4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/units.h"
22 #include "qemu/main-loop.h"
24 #include "exec/helper-proto.h"
25 #include "internals.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
29 #define SIGNBIT (uint32_t)0x80000000
30 #define SIGNBIT64 ((uint64_t)1 << 63)
32 static CPUState
*do_raise_exception(CPUARMState
*env
, uint32_t excp
,
33 uint32_t syndrome
, uint32_t target_el
)
35 CPUState
*cs
= env_cpu(env
);
37 if (target_el
== 1 && (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
39 * Redirect NS EL1 exceptions to NS EL2. These are reported with
40 * their original syndrome register value, with the exception of
41 * SIMD/FP access traps, which are reported as uncategorized
42 * (see DDI0478C.a D1.10.4)
45 if (syn_get_ec(syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
46 syndrome
= syn_uncategorized();
50 assert(!excp_is_internal(excp
));
51 cs
->exception_index
= excp
;
52 env
->exception
.syndrome
= syndrome
;
53 env
->exception
.target_el
= target_el
;
58 void raise_exception(CPUARMState
*env
, uint32_t excp
,
59 uint32_t syndrome
, uint32_t target_el
)
61 CPUState
*cs
= do_raise_exception(env
, excp
, syndrome
, target_el
);
65 void raise_exception_ra(CPUARMState
*env
, uint32_t excp
, uint32_t syndrome
,
66 uint32_t target_el
, uintptr_t ra
)
68 CPUState
*cs
= do_raise_exception(env
, excp
, syndrome
, target_el
);
69 cpu_loop_exit_restore(cs
, ra
);
72 uint32_t HELPER(neon_tbl
)(uint32_t ireg
, uint32_t def
, void *vn
,
79 for (shift
= 0; shift
< 32; shift
+= 8) {
80 uint32_t index
= (ireg
>> shift
) & 0xff;
81 if (index
< maxindex
) {
82 uint32_t tmp
= (table
[index
>> 3] >> ((index
& 7) << 3)) & 0xff;
85 val
|= def
& (0xff << shift
);
91 void HELPER(v8m_stackcheck
)(CPUARMState
*env
, uint32_t newvalue
)
94 * Perform the v8M stack limit check for SP updates from translated code,
95 * raising an exception if the limit is breached.
97 if (newvalue
< v7m_sp_limit(env
)) {
98 CPUState
*cs
= env_cpu(env
);
101 * Stack limit exceptions are a rare case, so rather than syncing
102 * PC/condbits before the call, we use cpu_restore_state() to
103 * get them right before raising the exception.
105 cpu_restore_state(cs
, GETPC(), true);
106 raise_exception(env
, EXCP_STKOF
, 0, 1);
110 uint32_t HELPER(add_setq
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
112 uint32_t res
= a
+ b
;
113 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
))
118 uint32_t HELPER(add_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
120 uint32_t res
= a
+ b
;
121 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
123 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
128 uint32_t HELPER(sub_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
130 uint32_t res
= a
- b
;
131 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
133 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
138 uint32_t HELPER(double_saturate
)(CPUARMState
*env
, int32_t val
)
141 if (val
>= 0x40000000) {
144 } else if (val
<= (int32_t)0xc0000000) {
153 uint32_t HELPER(add_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
155 uint32_t res
= a
+ b
;
163 uint32_t HELPER(sub_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
165 uint32_t res
= a
- b
;
173 /* Signed saturation. */
174 static inline uint32_t do_ssat(CPUARMState
*env
, int32_t val
, int shift
)
180 mask
= (1u << shift
) - 1;
184 } else if (top
< -1) {
191 /* Unsigned saturation. */
192 static inline uint32_t do_usat(CPUARMState
*env
, int32_t val
, int shift
)
196 max
= (1u << shift
) - 1;
200 } else if (val
> max
) {
207 /* Signed saturate. */
208 uint32_t HELPER(ssat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
210 return do_ssat(env
, x
, shift
);
213 /* Dual halfword signed saturate. */
214 uint32_t HELPER(ssat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
218 res
= (uint16_t)do_ssat(env
, (int16_t)x
, shift
);
219 res
|= do_ssat(env
, ((int32_t)x
) >> 16, shift
) << 16;
223 /* Unsigned saturate. */
224 uint32_t HELPER(usat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
226 return do_usat(env
, x
, shift
);
229 /* Dual halfword unsigned saturate. */
230 uint32_t HELPER(usat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
234 res
= (uint16_t)do_usat(env
, (int16_t)x
, shift
);
235 res
|= do_usat(env
, ((int32_t)x
) >> 16, shift
) << 16;
239 void HELPER(setend
)(CPUARMState
*env
)
241 env
->uncached_cpsr
^= CPSR_E
;
244 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
245 * The function returns the target EL (1-3) if the instruction is to be trapped;
246 * otherwise it returns 0 indicating it is not trapped.
248 static inline int check_wfx_trap(CPUARMState
*env
, bool is_wfe
)
250 int cur_el
= arm_current_el(env
);
253 if (arm_feature(env
, ARM_FEATURE_M
)) {
254 /* M profile cores can never trap WFI/WFE. */
258 /* If we are currently in EL0 then we need to check if SCTLR is set up for
259 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
261 if (cur_el
< 1 && arm_feature(env
, ARM_FEATURE_V8
)) {
264 mask
= is_wfe
? SCTLR_nTWE
: SCTLR_nTWI
;
265 if (arm_is_secure_below_el3(env
) && !arm_el_is_aa64(env
, 3)) {
266 /* Secure EL0 and Secure PL1 is at EL3 */
272 if (!(env
->cp15
.sctlr_el
[target_el
] & mask
)) {
277 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
278 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
279 * bits will be zero indicating no trap.
282 mask
= is_wfe
? HCR_TWE
: HCR_TWI
;
283 if (arm_hcr_el2_eff(env
) & mask
) {
288 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
290 mask
= (is_wfe
) ? SCR_TWE
: SCR_TWI
;
291 if (env
->cp15
.scr_el3
& mask
) {
299 void HELPER(wfi
)(CPUARMState
*env
, uint32_t insn_len
)
301 CPUState
*cs
= env_cpu(env
);
302 int target_el
= check_wfx_trap(env
, false);
304 if (cpu_has_work(cs
)) {
305 /* Don't bother to go into our "low power state" if
306 * we would just wake up immediately.
313 raise_exception(env
, EXCP_UDEF
, syn_wfx(1, 0xe, 0, insn_len
== 2),
317 cs
->exception_index
= EXCP_HLT
;
322 void HELPER(wfe
)(CPUARMState
*env
)
324 /* This is a hint instruction that is semantically different
325 * from YIELD even though we currently implement it identically.
326 * Don't actually halt the CPU, just yield back to top
327 * level loop. This is not going into a "low power state"
328 * (ie halting until some event occurs), so we never take
329 * a configurable trap to a different exception level.
334 void HELPER(yield
)(CPUARMState
*env
)
336 CPUState
*cs
= env_cpu(env
);
338 /* This is a non-trappable hint instruction that generally indicates
339 * that the guest is currently busy-looping. Yield control back to the
340 * top level loop so that a more deserving VCPU has a chance to run.
342 cs
->exception_index
= EXCP_YIELD
;
346 /* Raise an internal-to-QEMU exception. This is limited to only
347 * those EXCP values which are special cases for QEMU to interrupt
348 * execution and not to be used for exceptions which are passed to
349 * the guest (those must all have syndrome information and thus should
350 * use exception_with_syndrome).
352 void HELPER(exception_internal
)(CPUARMState
*env
, uint32_t excp
)
354 CPUState
*cs
= env_cpu(env
);
356 assert(excp_is_internal(excp
));
357 cs
->exception_index
= excp
;
361 /* Raise an exception with the specified syndrome register value */
362 void HELPER(exception_with_syndrome
)(CPUARMState
*env
, uint32_t excp
,
363 uint32_t syndrome
, uint32_t target_el
)
365 raise_exception(env
, excp
, syndrome
, target_el
);
368 /* Raise an EXCP_BKPT with the specified syndrome register value,
369 * targeting the correct exception level for debug exceptions.
371 void HELPER(exception_bkpt_insn
)(CPUARMState
*env
, uint32_t syndrome
)
373 /* FSR will only be used if the debug target EL is AArch32. */
374 env
->exception
.fsr
= arm_debug_exception_fsr(env
);
375 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
376 * values to the guest that it shouldn't be able to see at its
377 * exception/security level.
379 env
->exception
.vaddress
= 0;
380 raise_exception(env
, EXCP_BKPT
, syndrome
, arm_debug_target_el(env
));
383 uint32_t HELPER(cpsr_read
)(CPUARMState
*env
)
385 return cpsr_read(env
) & ~(CPSR_EXEC
| CPSR_RESERVED
);
388 void HELPER(cpsr_write
)(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
390 cpsr_write(env
, val
, mask
, CPSRWriteByInstr
);
393 /* Write the CPSR for a 32-bit exception return */
394 void HELPER(cpsr_write_eret
)(CPUARMState
*env
, uint32_t val
)
396 qemu_mutex_lock_iothread();
397 arm_call_pre_el_change_hook(env_archcpu(env
));
398 qemu_mutex_unlock_iothread();
400 cpsr_write(env
, val
, CPSR_ERET_MASK
, CPSRWriteExceptionReturn
);
402 /* Generated code has already stored the new PC value, but
403 * without masking out its low bits, because which bits need
404 * masking depends on whether we're returning to Thumb or ARM
405 * state. Do the masking now.
407 env
->regs
[15] &= (env
->thumb
? ~1 : ~3);
409 qemu_mutex_lock_iothread();
410 arm_call_el_change_hook(env_archcpu(env
));
411 qemu_mutex_unlock_iothread();
414 /* Access to user mode registers from privileged modes. */
415 uint32_t HELPER(get_user_reg
)(CPUARMState
*env
, uint32_t regno
)
420 val
= env
->banked_r13
[BANK_USRSYS
];
421 } else if (regno
== 14) {
422 val
= env
->banked_r14
[BANK_USRSYS
];
423 } else if (regno
>= 8
424 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
425 val
= env
->usr_regs
[regno
- 8];
427 val
= env
->regs
[regno
];
432 void HELPER(set_user_reg
)(CPUARMState
*env
, uint32_t regno
, uint32_t val
)
435 env
->banked_r13
[BANK_USRSYS
] = val
;
436 } else if (regno
== 14) {
437 env
->banked_r14
[BANK_USRSYS
] = val
;
438 } else if (regno
>= 8
439 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
440 env
->usr_regs
[regno
- 8] = val
;
442 env
->regs
[regno
] = val
;
446 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
448 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
451 env
->banked_r13
[bank_number(mode
)] = val
;
455 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
457 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_SYS
) {
458 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
459 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
461 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
462 exception_target_el(env
));
465 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
466 return env
->regs
[13];
468 return env
->banked_r13
[bank_number(mode
)];
472 static void msr_mrs_banked_exc_checks(CPUARMState
*env
, uint32_t tgtmode
,
475 /* Raise an exception if the requested access is one of the UNPREDICTABLE
476 * cases; otherwise return. This broadly corresponds to the pseudocode
477 * BankedRegisterAccessValid() and SPSRAccessValid(),
478 * except that we have already handled some cases at translate time.
480 int curmode
= env
->uncached_cpsr
& CPSR_M
;
483 /* ELR_Hyp: a special case because access from tgtmode is OK */
484 if (curmode
!= ARM_CPU_MODE_HYP
&& curmode
!= ARM_CPU_MODE_MON
) {
490 if (curmode
== tgtmode
) {
494 if (tgtmode
== ARM_CPU_MODE_USR
) {
497 if (curmode
!= ARM_CPU_MODE_FIQ
) {
502 if (curmode
== ARM_CPU_MODE_SYS
) {
507 if (curmode
== ARM_CPU_MODE_HYP
|| curmode
== ARM_CPU_MODE_SYS
) {
516 if (tgtmode
== ARM_CPU_MODE_HYP
) {
517 /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
518 if (curmode
!= ARM_CPU_MODE_MON
) {
526 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
527 exception_target_el(env
));
530 void HELPER(msr_banked
)(CPUARMState
*env
, uint32_t value
, uint32_t tgtmode
,
533 msr_mrs_banked_exc_checks(env
, tgtmode
, regno
);
537 env
->banked_spsr
[bank_number(tgtmode
)] = value
;
539 case 17: /* ELR_Hyp */
540 env
->elr_el
[2] = value
;
543 env
->banked_r13
[bank_number(tgtmode
)] = value
;
546 env
->banked_r14
[r14_bank_number(tgtmode
)] = value
;
550 case ARM_CPU_MODE_USR
:
551 env
->usr_regs
[regno
- 8] = value
;
553 case ARM_CPU_MODE_FIQ
:
554 env
->fiq_regs
[regno
- 8] = value
;
557 g_assert_not_reached();
561 g_assert_not_reached();
565 uint32_t HELPER(mrs_banked
)(CPUARMState
*env
, uint32_t tgtmode
, uint32_t regno
)
567 msr_mrs_banked_exc_checks(env
, tgtmode
, regno
);
571 return env
->banked_spsr
[bank_number(tgtmode
)];
572 case 17: /* ELR_Hyp */
573 return env
->elr_el
[2];
575 return env
->banked_r13
[bank_number(tgtmode
)];
577 return env
->banked_r14
[r14_bank_number(tgtmode
)];
580 case ARM_CPU_MODE_USR
:
581 return env
->usr_regs
[regno
- 8];
582 case ARM_CPU_MODE_FIQ
:
583 return env
->fiq_regs
[regno
- 8];
585 g_assert_not_reached();
588 g_assert_not_reached();
592 void HELPER(access_check_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t syndrome
,
595 const ARMCPRegInfo
*ri
= rip
;
598 if (arm_feature(env
, ARM_FEATURE_XSCALE
) && ri
->cp
< 14
599 && extract32(env
->cp15
.c15_cpar
, ri
->cp
, 1) == 0) {
600 raise_exception(env
, EXCP_UDEF
, syndrome
, exception_target_el(env
));
607 switch (ri
->accessfn(env
, ri
, isread
)) {
611 target_el
= exception_target_el(env
);
613 case CP_ACCESS_TRAP_EL2
:
614 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
615 * a bug in the access function.
617 assert(!arm_is_secure(env
) && arm_current_el(env
) != 3);
620 case CP_ACCESS_TRAP_EL3
:
623 case CP_ACCESS_TRAP_UNCATEGORIZED
:
624 target_el
= exception_target_el(env
);
625 syndrome
= syn_uncategorized();
627 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2
:
629 syndrome
= syn_uncategorized();
631 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3
:
633 syndrome
= syn_uncategorized();
635 case CP_ACCESS_TRAP_FP_EL2
:
637 /* Since we are an implementation that takes exceptions on a trapped
638 * conditional insn only if the insn has passed its condition code
639 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
640 * (which is also the required value for AArch64 traps).
642 syndrome
= syn_fp_access_trap(1, 0xe, false);
644 case CP_ACCESS_TRAP_FP_EL3
:
646 syndrome
= syn_fp_access_trap(1, 0xe, false);
649 g_assert_not_reached();
652 raise_exception(env
, EXCP_UDEF
, syndrome
, target_el
);
655 void HELPER(set_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t value
)
657 const ARMCPRegInfo
*ri
= rip
;
659 if (ri
->type
& ARM_CP_IO
) {
660 qemu_mutex_lock_iothread();
661 ri
->writefn(env
, ri
, value
);
662 qemu_mutex_unlock_iothread();
664 ri
->writefn(env
, ri
, value
);
668 uint32_t HELPER(get_cp_reg
)(CPUARMState
*env
, void *rip
)
670 const ARMCPRegInfo
*ri
= rip
;
673 if (ri
->type
& ARM_CP_IO
) {
674 qemu_mutex_lock_iothread();
675 res
= ri
->readfn(env
, ri
);
676 qemu_mutex_unlock_iothread();
678 res
= ri
->readfn(env
, ri
);
684 void HELPER(set_cp_reg64
)(CPUARMState
*env
, void *rip
, uint64_t value
)
686 const ARMCPRegInfo
*ri
= rip
;
688 if (ri
->type
& ARM_CP_IO
) {
689 qemu_mutex_lock_iothread();
690 ri
->writefn(env
, ri
, value
);
691 qemu_mutex_unlock_iothread();
693 ri
->writefn(env
, ri
, value
);
697 uint64_t HELPER(get_cp_reg64
)(CPUARMState
*env
, void *rip
)
699 const ARMCPRegInfo
*ri
= rip
;
702 if (ri
->type
& ARM_CP_IO
) {
703 qemu_mutex_lock_iothread();
704 res
= ri
->readfn(env
, ri
);
705 qemu_mutex_unlock_iothread();
707 res
= ri
->readfn(env
, ri
);
713 void HELPER(pre_hvc
)(CPUARMState
*env
)
715 ARMCPU
*cpu
= env_archcpu(env
);
716 int cur_el
= arm_current_el(env
);
717 /* FIXME: Use actual secure state. */
721 if (arm_is_psci_call(cpu
, EXCP_HVC
)) {
722 /* If PSCI is enabled and this looks like a valid PSCI call then
723 * that overrides the architecturally mandated HVC behaviour.
728 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
729 /* If EL2 doesn't exist, HVC always UNDEFs */
731 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
732 /* EL3.HCE has priority over EL2.HCD. */
733 undef
= !(env
->cp15
.scr_el3
& SCR_HCE
);
735 undef
= env
->cp15
.hcr_el2
& HCR_HCD
;
738 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
739 * For ARMv8/AArch64, HVC is allowed in EL3.
740 * Note that we've already trapped HVC from EL0 at translation
743 if (secure
&& (!is_a64(env
) || cur_el
== 1)) {
748 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
749 exception_target_el(env
));
753 void HELPER(pre_smc
)(CPUARMState
*env
, uint32_t syndrome
)
755 ARMCPU
*cpu
= env_archcpu(env
);
756 int cur_el
= arm_current_el(env
);
757 bool secure
= arm_is_secure(env
);
758 bool smd_flag
= env
->cp15
.scr_el3
& SCR_SMD
;
761 * SMC behaviour is summarized in the following table.
762 * This helper handles the "Trap to EL2" and "Undef insn" cases.
763 * The "Trap to EL3" and "PSCI call" cases are handled in the exception
766 * -> ARM_FEATURE_EL3 and !SMD
767 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
769 * Conduit SMC, valid call Trap to EL2 PSCI Call
770 * Conduit SMC, inval call Trap to EL2 Trap to EL3
771 * Conduit not SMC Trap to EL2 Trap to EL3
774 * -> ARM_FEATURE_EL3 and SMD
775 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
777 * Conduit SMC, valid call Trap to EL2 PSCI Call
778 * Conduit SMC, inval call Trap to EL2 Undef insn
779 * Conduit not SMC Trap to EL2 Undef insn
782 * -> !ARM_FEATURE_EL3
783 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
785 * Conduit SMC, valid call Trap to EL2 PSCI Call
786 * Conduit SMC, inval call Trap to EL2 Undef insn
787 * Conduit not SMC Undef insn Undef insn
790 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
791 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
792 * extensions, SMD only applies to NS state.
793 * On ARMv7 without the Virtualization extensions, the SMD bit
794 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
795 * so we need not special case this here.
797 bool smd
= arm_feature(env
, ARM_FEATURE_AARCH64
) ? smd_flag
798 : smd_flag
&& !secure
;
800 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
801 cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
802 /* If we have no EL3 then SMC always UNDEFs and can't be
803 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
804 * firmware within QEMU, and we want an EL2 guest to be able
805 * to forbid its EL1 from making PSCI calls into QEMU's
806 * "firmware" via HCR.TSC, so for these purposes treat
807 * PSCI-via-SMC as implying an EL3.
808 * This handles the very last line of the previous table.
810 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
811 exception_target_el(env
));
814 if (cur_el
== 1 && (arm_hcr_el2_eff(env
) & HCR_TSC
)) {
815 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
816 * We also want an EL2 guest to be able to forbid its EL1 from
817 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
818 * This handles all the "Trap to EL2" cases of the previous table.
820 raise_exception(env
, EXCP_HYP_TRAP
, syndrome
, 2);
823 /* Catch the two remaining "Undef insn" cases of the previous table:
824 * - PSCI conduit is SMC but we don't have a valid PCSI call,
825 * - We don't have EL3 or SMD is set.
827 if (!arm_is_psci_call(cpu
, EXCP_SMC
) &&
828 (smd
|| !arm_feature(env
, ARM_FEATURE_EL3
))) {
829 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
830 exception_target_el(env
));
834 /* Return true if the linked breakpoint entry lbn passes its checks */
835 static bool linked_bp_matches(ARMCPU
*cpu
, int lbn
)
837 CPUARMState
*env
= &cpu
->env
;
838 uint64_t bcr
= env
->cp15
.dbgbcr
[lbn
];
839 int brps
= extract32(cpu
->dbgdidr
, 24, 4);
840 int ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
845 * Links to unimplemented or non-context aware breakpoints are
846 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
847 * as if linked to an UNKNOWN context-aware breakpoint (in which
848 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
849 * We choose the former.
851 if (lbn
> brps
|| lbn
< (brps
- ctx_cmps
)) {
855 bcr
= env
->cp15
.dbgbcr
[lbn
];
857 if (extract64(bcr
, 0, 1) == 0) {
858 /* Linked breakpoint disabled : generate no events */
862 bt
= extract64(bcr
, 20, 4);
865 * We match the whole register even if this is AArch32 using the
866 * short descriptor format (in which case it holds both PROCID and ASID),
867 * since we don't implement the optional v7 context ID masking.
869 contextidr
= extract64(env
->cp15
.contextidr_el
[1], 0, 32);
872 case 3: /* linked context ID match */
873 if (arm_current_el(env
) > 1) {
874 /* Context matches never fire in EL2 or (AArch64) EL3 */
877 return (contextidr
== extract64(env
->cp15
.dbgbvr
[lbn
], 0, 32));
878 case 5: /* linked address mismatch (reserved in AArch64) */
879 case 9: /* linked VMID match (reserved if no EL2) */
880 case 11: /* linked context ID and VMID match (reserved if no EL2) */
883 * Links to Unlinked context breakpoints must generate no
884 * events; we choose to do the same for reserved values too.
892 static bool bp_wp_matches(ARMCPU
*cpu
, int n
, bool is_wp
)
894 CPUARMState
*env
= &cpu
->env
;
896 int pac
, hmc
, ssc
, wt
, lbn
;
898 * Note that for watchpoints the check is against the CPU security
899 * state, not the S/NS attribute on the offending data access.
901 bool is_secure
= arm_is_secure(env
);
902 int access_el
= arm_current_el(env
);
905 CPUWatchpoint
*wp
= env
->cpu_watchpoint
[n
];
907 if (!wp
|| !(wp
->flags
& BP_WATCHPOINT_HIT
)) {
910 cr
= env
->cp15
.dbgwcr
[n
];
911 if (wp
->hitattrs
.user
) {
913 * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
914 * match watchpoints as if they were accesses done at EL0, even if
915 * the CPU is at EL1 or higher.
920 uint64_t pc
= is_a64(env
) ? env
->pc
: env
->regs
[15];
922 if (!env
->cpu_breakpoint
[n
] || env
->cpu_breakpoint
[n
]->pc
!= pc
) {
925 cr
= env
->cp15
.dbgbcr
[n
];
928 * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
929 * enabled and that the address and access type match; for breakpoints
930 * we know the address matched; check the remaining fields, including
931 * linked breakpoints. We rely on WCR and BCR having the same layout
932 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
933 * Note that some combinations of {PAC, HMC, SSC} are reserved and
934 * must act either like some valid combination or as if the watchpoint
935 * were disabled. We choose the former, and use this together with
936 * the fact that EL3 must always be Secure and EL2 must always be
937 * Non-Secure to simplify the code slightly compared to the full
938 * table in the ARM ARM.
940 pac
= extract64(cr
, 1, 2);
941 hmc
= extract64(cr
, 13, 1);
942 ssc
= extract64(cr
, 14, 2);
968 if (extract32(pac
, 0, 1) == 0) {
973 if (extract32(pac
, 1, 1) == 0) {
978 g_assert_not_reached();
981 wt
= extract64(cr
, 20, 1);
982 lbn
= extract64(cr
, 16, 4);
984 if (wt
&& !linked_bp_matches(cpu
, lbn
)) {
991 static bool check_watchpoints(ARMCPU
*cpu
)
993 CPUARMState
*env
= &cpu
->env
;
997 * If watchpoints are disabled globally or we can't take debug
998 * exceptions here then watchpoint firings are ignored.
1000 if (extract32(env
->cp15
.mdscr_el1
, 15, 1) == 0
1001 || !arm_generate_debug_exceptions(env
)) {
1005 for (n
= 0; n
< ARRAY_SIZE(env
->cpu_watchpoint
); n
++) {
1006 if (bp_wp_matches(cpu
, n
, true)) {
1013 static bool check_breakpoints(ARMCPU
*cpu
)
1015 CPUARMState
*env
= &cpu
->env
;
1019 * If breakpoints are disabled globally or we can't take debug
1020 * exceptions here then breakpoint firings are ignored.
1022 if (extract32(env
->cp15
.mdscr_el1
, 15, 1) == 0
1023 || !arm_generate_debug_exceptions(env
)) {
1027 for (n
= 0; n
< ARRAY_SIZE(env
->cpu_breakpoint
); n
++) {
1028 if (bp_wp_matches(cpu
, n
, false)) {
1035 void HELPER(check_breakpoints
)(CPUARMState
*env
)
1037 ARMCPU
*cpu
= env_archcpu(env
);
1039 if (check_breakpoints(cpu
)) {
1040 HELPER(exception_internal(env
, EXCP_DEBUG
));
1044 bool arm_debug_check_watchpoint(CPUState
*cs
, CPUWatchpoint
*wp
)
1047 * Called by core code when a CPU watchpoint fires; need to check if this
1048 * is also an architectural watchpoint match.
1050 ARMCPU
*cpu
= ARM_CPU(cs
);
1052 return check_watchpoints(cpu
);
1055 vaddr
arm_adjust_watchpoint_address(CPUState
*cs
, vaddr addr
, int len
)
1057 ARMCPU
*cpu
= ARM_CPU(cs
);
1058 CPUARMState
*env
= &cpu
->env
;
1061 * In BE32 system mode, target memory is stored byteswapped (on a
1062 * little-endian host system), and by the time we reach here (via an
1063 * opcode helper) the addresses of subword accesses have been adjusted
1064 * to account for that, which means that watchpoints will not match.
1065 * Undo the adjustment here.
1067 if (arm_sctlr_b(env
)) {
1070 } else if (len
== 2) {
1078 void arm_debug_excp_handler(CPUState
*cs
)
1081 * Called by core code when a watchpoint or breakpoint fires;
1082 * need to check which one and raise the appropriate exception.
1084 ARMCPU
*cpu
= ARM_CPU(cs
);
1085 CPUARMState
*env
= &cpu
->env
;
1086 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
1089 if (wp_hit
->flags
& BP_CPU
) {
1090 bool wnr
= (wp_hit
->flags
& BP_WATCHPOINT_HIT_WRITE
) != 0;
1091 bool same_el
= arm_debug_target_el(env
) == arm_current_el(env
);
1093 cs
->watchpoint_hit
= NULL
;
1095 env
->exception
.fsr
= arm_debug_exception_fsr(env
);
1096 env
->exception
.vaddress
= wp_hit
->hitaddr
;
1097 raise_exception(env
, EXCP_DATA_ABORT
,
1098 syn_watchpoint(same_el
, 0, wnr
),
1099 arm_debug_target_el(env
));
1102 uint64_t pc
= is_a64(env
) ? env
->pc
: env
->regs
[15];
1103 bool same_el
= (arm_debug_target_el(env
) == arm_current_el(env
));
1106 * (1) GDB breakpoints should be handled first.
1107 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1108 * since singlestep is also done by generating a debug internal
1111 if (cpu_breakpoint_test(cs
, pc
, BP_GDB
)
1112 || !cpu_breakpoint_test(cs
, pc
, BP_CPU
)) {
1116 env
->exception
.fsr
= arm_debug_exception_fsr(env
);
1118 * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
1119 * values to the guest that it shouldn't be able to see at its
1120 * exception/security level.
1122 env
->exception
.vaddress
= 0;
1123 raise_exception(env
, EXCP_PREFETCH_ABORT
,
1124 syn_breakpoint(same_el
),
1125 arm_debug_target_el(env
));
1129 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1130 The only way to do that in TCG is a conditional branch, which clobbers
1131 all our temporaries. For now implement these as helper functions. */
1133 /* Similarly for variable shift instructions. */
1135 uint32_t HELPER(shl_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1137 int shift
= i
& 0xff;
1144 } else if (shift
!= 0) {
1145 env
->CF
= (x
>> (32 - shift
)) & 1;
1151 uint32_t HELPER(shr_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1153 int shift
= i
& 0xff;
1156 env
->CF
= (x
>> 31) & 1;
1160 } else if (shift
!= 0) {
1161 env
->CF
= (x
>> (shift
- 1)) & 1;
1167 uint32_t HELPER(sar_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1169 int shift
= i
& 0xff;
1171 env
->CF
= (x
>> 31) & 1;
1172 return (int32_t)x
>> 31;
1173 } else if (shift
!= 0) {
1174 env
->CF
= (x
>> (shift
- 1)) & 1;
1175 return (int32_t)x
>> shift
;
1180 uint32_t HELPER(ror_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1184 shift
= shift1
& 0x1f;
1187 env
->CF
= (x
>> 31) & 1;
1190 env
->CF
= (x
>> (shift
- 1)) & 1;
1191 return ((uint32_t)x
>> shift
) | (x
<< (32 - shift
));
1195 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
1198 * Implement DC ZVA, which zeroes a fixed-length block of memory.
1199 * Note that we do not implement the (architecturally mandated)
1200 * alignment fault for attempts to use this on Device memory
1201 * (which matches the usual QEMU behaviour of not implementing either
1202 * alignment faults or any memory attribute handling).
1205 ARMCPU
*cpu
= env_archcpu(env
);
1206 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
1207 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
1209 #ifndef CONFIG_USER_ONLY
1212 * Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
1213 * the block size so we might have to do more than one TLB lookup.
1214 * We know that in fact for any v8 CPU the page size is at least 4K
1215 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
1216 * 1K as an artefact of legacy v5 subpage support being present in the
1217 * same QEMU executable. So in practice the hostaddr[] array has
1218 * two entries, given the current setting of TARGET_PAGE_BITS_MIN.
1220 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
1221 void *hostaddr
[DIV_ROUND_UP(2 * KiB
, 1 << TARGET_PAGE_BITS_MIN
)];
1223 unsigned mmu_idx
= cpu_mmu_index(env
, false);
1224 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
1226 assert(maxidx
<= ARRAY_SIZE(hostaddr
));
1228 for (try = 0; try < 2; try++) {
1230 for (i
= 0; i
< maxidx
; i
++) {
1231 hostaddr
[i
] = tlb_vaddr_to_host(env
,
1232 vaddr
+ TARGET_PAGE_SIZE
* i
,
1240 * If it's all in the TLB it's fair game for just writing to;
1241 * we know we don't need to update dirty status, etc.
1243 for (i
= 0; i
< maxidx
- 1; i
++) {
1244 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
1246 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
1250 * OK, try a store and see if we can populate the tlb. This
1251 * might cause an exception if the memory isn't writable,
1252 * in which case we will longjmp out of here. We must for
1253 * this purpose use the actual register value passed to us
1254 * so that we get the fault address right.
1256 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
1257 /* Now we can populate the other TLB entries, if any */
1258 for (i
= 0; i
< maxidx
; i
++) {
1259 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
1260 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
1261 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
1267 * Slow path (probably attempt to do this to an I/O device or
1268 * similar, or clearing of a block of code we have translations
1269 * cached for). Just do a series of byte writes as the architecture
1270 * demands. It's not worth trying to use a cpu_physical_memory_map(),
1271 * memset(), unmap() sequence here because:
1272 * + we'd need to account for the blocksize being larger than a page
1273 * + the direct-RAM access case is almost always going to be dealt
1274 * with in the fastpath code above, so there's no speed benefit
1275 * + we would have to deal with the map returning NULL because the
1276 * bounce buffer was in use
1278 for (i
= 0; i
< blocklen
; i
++) {
1279 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
1283 memset(g2h(vaddr
), 0, blocklen
);