4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
23 #include "exec/helper-proto.h"
24 #include "internals.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #define SIGNBIT (uint32_t)0x80000000
29 #define SIGNBIT64 ((uint64_t)1 << 63)
31 static CPUState
*do_raise_exception(CPUARMState
*env
, uint32_t excp
,
32 uint32_t syndrome
, uint32_t target_el
)
34 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
36 if (target_el
== 1 && (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
38 * Redirect NS EL1 exceptions to NS EL2. These are reported with
39 * their original syndrome register value, with the exception of
40 * SIMD/FP access traps, which are reported as uncategorized
41 * (see DDI0478C.a D1.10.4)
44 if (syn_get_ec(syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
45 syndrome
= syn_uncategorized();
49 assert(!excp_is_internal(excp
));
50 cs
->exception_index
= excp
;
51 env
->exception
.syndrome
= syndrome
;
52 env
->exception
.target_el
= target_el
;
57 void raise_exception(CPUARMState
*env
, uint32_t excp
,
58 uint32_t syndrome
, uint32_t target_el
)
60 CPUState
*cs
= do_raise_exception(env
, excp
, syndrome
, target_el
);
64 void raise_exception_ra(CPUARMState
*env
, uint32_t excp
, uint32_t syndrome
,
65 uint32_t target_el
, uintptr_t ra
)
67 CPUState
*cs
= do_raise_exception(env
, excp
, syndrome
, target_el
);
68 cpu_loop_exit_restore(cs
, ra
);
71 static int exception_target_el(CPUARMState
*env
)
73 int target_el
= MAX(1, arm_current_el(env
));
75 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
76 * to EL3 in this case.
78 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3) && target_el
== 1) {
85 uint32_t HELPER(neon_tbl
)(uint32_t ireg
, uint32_t def
, void *vn
,
92 for (shift
= 0; shift
< 32; shift
+= 8) {
93 uint32_t index
= (ireg
>> shift
) & 0xff;
94 if (index
< maxindex
) {
95 uint32_t tmp
= (table
[index
>> 3] >> ((index
& 7) << 3)) & 0xff;
98 val
|= def
& (0xff << shift
);
104 #if !defined(CONFIG_USER_ONLY)
106 static inline uint32_t merge_syn_data_abort(uint32_t template_syn
,
107 unsigned int target_el
,
108 bool same_el
, bool ea
,
109 bool s1ptw
, bool is_write
,
114 /* ISV is only set for data aborts routed to EL2 and
115 * never for stage-1 page table walks faulting on stage 2.
117 * Furthermore, ISV is only set for certain kinds of load/stores.
118 * If the template syndrome does not have ISV set, we should leave
121 * See ARMv8 specs, D7-1974:
122 * ISS encoding for an exception from a Data Abort, the
125 if (!(template_syn
& ARM_EL_ISV
) || target_el
!= 2 || s1ptw
) {
126 syn
= syn_data_abort_no_iss(same_el
,
127 ea
, 0, s1ptw
, is_write
, fsc
);
129 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
130 * syndrome created at translation time.
131 * Now we create the runtime syndrome with the remaining fields.
133 syn
= syn_data_abort_with_iss(same_el
,
135 ea
, 0, s1ptw
, is_write
, fsc
,
137 /* Merge the runtime syndrome with the template syndrome. */
143 static void deliver_fault(ARMCPU
*cpu
, vaddr addr
, MMUAccessType access_type
,
144 int mmu_idx
, ARMMMUFaultInfo
*fi
)
146 CPUARMState
*env
= &cpu
->env
;
149 uint32_t syn
, exc
, fsr
, fsc
;
150 ARMMMUIdx arm_mmu_idx
= core_to_arm_mmu_idx(env
, mmu_idx
);
152 target_el
= exception_target_el(env
);
155 env
->cp15
.hpfar_el2
= extract64(fi
->s2addr
, 12, 47) << 4;
157 same_el
= (arm_current_el(env
) == target_el
);
159 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
160 arm_s1_regime_using_lpae_format(env
, arm_mmu_idx
)) {
161 /* LPAE format fault status register : bottom 6 bits are
162 * status code in the same form as needed for syndrome
164 fsr
= arm_fi_to_lfsc(fi
);
165 fsc
= extract32(fsr
, 0, 6);
167 fsr
= arm_fi_to_sfsc(fi
);
168 /* Short format FSR : this fault will never actually be reported
169 * to an EL that uses a syndrome register. Use a (currently)
170 * reserved FSR code in case the constructed syndrome does leak
171 * into the guest somehow.
176 if (access_type
== MMU_INST_FETCH
) {
177 syn
= syn_insn_abort(same_el
, fi
->ea
, fi
->s1ptw
, fsc
);
178 exc
= EXCP_PREFETCH_ABORT
;
180 syn
= merge_syn_data_abort(env
->exception
.syndrome
, target_el
,
181 same_el
, fi
->ea
, fi
->s1ptw
,
182 access_type
== MMU_DATA_STORE
,
184 if (access_type
== MMU_DATA_STORE
185 && arm_feature(env
, ARM_FEATURE_V6
)) {
188 exc
= EXCP_DATA_ABORT
;
191 env
->exception
.vaddress
= addr
;
192 env
->exception
.fsr
= fsr
;
193 raise_exception(env
, exc
, syn
, target_el
);
196 /* try to fill the TLB and return an exception if error. If retaddr is
197 * NULL, it means that the function was called in C code (i.e. not
198 * from generated code or from helper.c)
200 void tlb_fill(CPUState
*cs
, target_ulong addr
, int size
,
201 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
204 ARMMMUFaultInfo fi
= {};
206 ret
= arm_tlb_fill(cs
, addr
, access_type
, mmu_idx
, &fi
);
208 ARMCPU
*cpu
= ARM_CPU(cs
);
210 /* now we have a real cpu fault */
211 cpu_restore_state(cs
, retaddr
, true);
213 deliver_fault(cpu
, addr
, access_type
, mmu_idx
, &fi
);
217 /* Raise a data fault alignment exception for the specified virtual address */
218 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
219 MMUAccessType access_type
,
220 int mmu_idx
, uintptr_t retaddr
)
222 ARMCPU
*cpu
= ARM_CPU(cs
);
223 ARMMMUFaultInfo fi
= {};
225 /* now we have a real cpu fault */
226 cpu_restore_state(cs
, retaddr
, true);
228 fi
.type
= ARMFault_Alignment
;
229 deliver_fault(cpu
, vaddr
, access_type
, mmu_idx
, &fi
);
232 /* arm_cpu_do_transaction_failed: handle a memory system error response
233 * (eg "no device/memory present at address") by raising an external abort
236 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
237 vaddr addr
, unsigned size
,
238 MMUAccessType access_type
,
239 int mmu_idx
, MemTxAttrs attrs
,
240 MemTxResult response
, uintptr_t retaddr
)
242 ARMCPU
*cpu
= ARM_CPU(cs
);
243 ARMMMUFaultInfo fi
= {};
245 /* now we have a real cpu fault */
246 cpu_restore_state(cs
, retaddr
, true);
248 fi
.ea
= arm_extabort_type(response
);
249 fi
.type
= ARMFault_SyncExternal
;
250 deliver_fault(cpu
, addr
, access_type
, mmu_idx
, &fi
);
253 #endif /* !defined(CONFIG_USER_ONLY) */
255 void HELPER(v8m_stackcheck
)(CPUARMState
*env
, uint32_t newvalue
)
258 * Perform the v8M stack limit check for SP updates from translated code,
259 * raising an exception if the limit is breached.
261 if (newvalue
< v7m_sp_limit(env
)) {
262 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
265 * Stack limit exceptions are a rare case, so rather than syncing
266 * PC/condbits before the call, we use cpu_restore_state() to
267 * get them right before raising the exception.
269 cpu_restore_state(cs
, GETPC(), true);
270 raise_exception(env
, EXCP_STKOF
, 0, 1);
274 uint32_t HELPER(add_setq
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
276 uint32_t res
= a
+ b
;
277 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
))
282 uint32_t HELPER(add_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
284 uint32_t res
= a
+ b
;
285 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
287 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
292 uint32_t HELPER(sub_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
294 uint32_t res
= a
- b
;
295 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
297 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
302 uint32_t HELPER(double_saturate
)(CPUARMState
*env
, int32_t val
)
305 if (val
>= 0x40000000) {
308 } else if (val
<= (int32_t)0xc0000000) {
317 uint32_t HELPER(add_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
319 uint32_t res
= a
+ b
;
327 uint32_t HELPER(sub_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
329 uint32_t res
= a
- b
;
337 /* Signed saturation. */
338 static inline uint32_t do_ssat(CPUARMState
*env
, int32_t val
, int shift
)
344 mask
= (1u << shift
) - 1;
348 } else if (top
< -1) {
355 /* Unsigned saturation. */
356 static inline uint32_t do_usat(CPUARMState
*env
, int32_t val
, int shift
)
360 max
= (1u << shift
) - 1;
364 } else if (val
> max
) {
371 /* Signed saturate. */
372 uint32_t HELPER(ssat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
374 return do_ssat(env
, x
, shift
);
377 /* Dual halfword signed saturate. */
378 uint32_t HELPER(ssat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
382 res
= (uint16_t)do_ssat(env
, (int16_t)x
, shift
);
383 res
|= do_ssat(env
, ((int32_t)x
) >> 16, shift
) << 16;
387 /* Unsigned saturate. */
388 uint32_t HELPER(usat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
390 return do_usat(env
, x
, shift
);
393 /* Dual halfword unsigned saturate. */
394 uint32_t HELPER(usat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
398 res
= (uint16_t)do_usat(env
, (int16_t)x
, shift
);
399 res
|= do_usat(env
, ((int32_t)x
) >> 16, shift
) << 16;
403 void HELPER(setend
)(CPUARMState
*env
)
405 env
->uncached_cpsr
^= CPSR_E
;
408 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
409 * The function returns the target EL (1-3) if the instruction is to be trapped;
410 * otherwise it returns 0 indicating it is not trapped.
412 static inline int check_wfx_trap(CPUARMState
*env
, bool is_wfe
)
414 int cur_el
= arm_current_el(env
);
417 if (arm_feature(env
, ARM_FEATURE_M
)) {
418 /* M profile cores can never trap WFI/WFE. */
422 /* If we are currently in EL0 then we need to check if SCTLR is set up for
423 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
425 if (cur_el
< 1 && arm_feature(env
, ARM_FEATURE_V8
)) {
428 mask
= is_wfe
? SCTLR_nTWE
: SCTLR_nTWI
;
429 if (arm_is_secure_below_el3(env
) && !arm_el_is_aa64(env
, 3)) {
430 /* Secure EL0 and Secure PL1 is at EL3 */
436 if (!(env
->cp15
.sctlr_el
[target_el
] & mask
)) {
441 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
442 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
443 * bits will be zero indicating no trap.
446 mask
= is_wfe
? HCR_TWE
: HCR_TWI
;
447 if (arm_hcr_el2_eff(env
) & mask
) {
452 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
454 mask
= (is_wfe
) ? SCR_TWE
: SCR_TWI
;
455 if (env
->cp15
.scr_el3
& mask
) {
463 void HELPER(wfi
)(CPUARMState
*env
, uint32_t insn_len
)
465 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
466 int target_el
= check_wfx_trap(env
, false);
468 if (cpu_has_work(cs
)) {
469 /* Don't bother to go into our "low power state" if
470 * we would just wake up immediately.
477 raise_exception(env
, EXCP_UDEF
, syn_wfx(1, 0xe, 0, insn_len
== 2),
481 cs
->exception_index
= EXCP_HLT
;
486 void QEMU_NORETURN
HELPER(wfe
)(CPUARMState
*env
)
488 /* This is a hint instruction that is semantically different
489 * from YIELD even though we currently implement it identically.
490 * Don't actually halt the CPU, just yield back to top
491 * level loop. This is not going into a "low power state"
492 * (ie halting until some event occurs), so we never take
493 * a configurable trap to a different exception level.
498 void QEMU_NORETURN
HELPER(yield
)(CPUARMState
*env
)
500 ARMCPU
*cpu
= arm_env_get_cpu(env
);
501 CPUState
*cs
= CPU(cpu
);
503 /* This is a non-trappable hint instruction that generally indicates
504 * that the guest is currently busy-looping. Yield control back to the
505 * top level loop so that a more deserving VCPU has a chance to run.
507 cs
->exception_index
= EXCP_YIELD
;
511 /* Raise an internal-to-QEMU exception. This is limited to only
512 * those EXCP values which are special cases for QEMU to interrupt
513 * execution and not to be used for exceptions which are passed to
514 * the guest (those must all have syndrome information and thus should
515 * use exception_with_syndrome).
517 void QEMU_NORETURN
HELPER(exception_internal
)(CPUARMState
*env
, uint32_t excp
)
519 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
521 assert(excp_is_internal(excp
));
522 cs
->exception_index
= excp
;
526 /* Raise an exception with the specified syndrome register value */
528 HELPER(exception_with_syndrome
)(CPUARMState
*env
, uint32_t excp
,
529 uint32_t syndrome
, uint32_t target_el
)
531 raise_exception(env
, excp
, syndrome
, target_el
);
534 /* Raise an EXCP_BKPT with the specified syndrome register value,
535 * targeting the correct exception level for debug exceptions.
537 void HELPER(exception_bkpt_insn
)(CPUARMState
*env
, uint32_t syndrome
)
539 /* FSR will only be used if the debug target EL is AArch32. */
540 env
->exception
.fsr
= arm_debug_exception_fsr(env
);
541 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
542 * values to the guest that it shouldn't be able to see at its
543 * exception/security level.
545 env
->exception
.vaddress
= 0;
546 raise_exception(env
, EXCP_BKPT
, syndrome
, arm_debug_target_el(env
));
549 uint32_t HELPER(cpsr_read
)(CPUARMState
*env
)
551 return cpsr_read(env
) & ~(CPSR_EXEC
| CPSR_RESERVED
);
554 void HELPER(cpsr_write
)(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
556 cpsr_write(env
, val
, mask
, CPSRWriteByInstr
);
559 /* Write the CPSR for a 32-bit exception return */
560 void HELPER(cpsr_write_eret
)(CPUARMState
*env
, uint32_t val
)
562 qemu_mutex_lock_iothread();
563 arm_call_pre_el_change_hook(arm_env_get_cpu(env
));
564 qemu_mutex_unlock_iothread();
566 cpsr_write(env
, val
, CPSR_ERET_MASK
, CPSRWriteExceptionReturn
);
568 /* Generated code has already stored the new PC value, but
569 * without masking out its low bits, because which bits need
570 * masking depends on whether we're returning to Thumb or ARM
571 * state. Do the masking now.
573 env
->regs
[15] &= (env
->thumb
? ~1 : ~3);
575 qemu_mutex_lock_iothread();
576 arm_call_el_change_hook(arm_env_get_cpu(env
));
577 qemu_mutex_unlock_iothread();
580 /* Access to user mode registers from privileged modes. */
581 uint32_t HELPER(get_user_reg
)(CPUARMState
*env
, uint32_t regno
)
586 val
= env
->banked_r13
[BANK_USRSYS
];
587 } else if (regno
== 14) {
588 val
= env
->banked_r14
[BANK_USRSYS
];
589 } else if (regno
>= 8
590 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
591 val
= env
->usr_regs
[regno
- 8];
593 val
= env
->regs
[regno
];
598 void HELPER(set_user_reg
)(CPUARMState
*env
, uint32_t regno
, uint32_t val
)
601 env
->banked_r13
[BANK_USRSYS
] = val
;
602 } else if (regno
== 14) {
603 env
->banked_r14
[BANK_USRSYS
] = val
;
604 } else if (regno
>= 8
605 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
606 env
->usr_regs
[regno
- 8] = val
;
608 env
->regs
[regno
] = val
;
612 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
614 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
617 env
->banked_r13
[bank_number(mode
)] = val
;
621 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
623 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_SYS
) {
624 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
625 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
627 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
628 exception_target_el(env
));
631 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
632 return env
->regs
[13];
634 return env
->banked_r13
[bank_number(mode
)];
638 static void msr_mrs_banked_exc_checks(CPUARMState
*env
, uint32_t tgtmode
,
641 /* Raise an exception if the requested access is one of the UNPREDICTABLE
642 * cases; otherwise return. This broadly corresponds to the pseudocode
643 * BankedRegisterAccessValid() and SPSRAccessValid(),
644 * except that we have already handled some cases at translate time.
646 int curmode
= env
->uncached_cpsr
& CPSR_M
;
649 /* ELR_Hyp: a special case because access from tgtmode is OK */
650 if (curmode
!= ARM_CPU_MODE_HYP
&& curmode
!= ARM_CPU_MODE_MON
) {
656 if (curmode
== tgtmode
) {
660 if (tgtmode
== ARM_CPU_MODE_USR
) {
663 if (curmode
!= ARM_CPU_MODE_FIQ
) {
668 if (curmode
== ARM_CPU_MODE_SYS
) {
673 if (curmode
== ARM_CPU_MODE_HYP
|| curmode
== ARM_CPU_MODE_SYS
) {
682 if (tgtmode
== ARM_CPU_MODE_HYP
) {
683 /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
684 if (curmode
!= ARM_CPU_MODE_MON
) {
692 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
693 exception_target_el(env
));
696 void HELPER(msr_banked
)(CPUARMState
*env
, uint32_t value
, uint32_t tgtmode
,
699 msr_mrs_banked_exc_checks(env
, tgtmode
, regno
);
703 env
->banked_spsr
[bank_number(tgtmode
)] = value
;
705 case 17: /* ELR_Hyp */
706 env
->elr_el
[2] = value
;
709 env
->banked_r13
[bank_number(tgtmode
)] = value
;
712 env
->banked_r14
[r14_bank_number(tgtmode
)] = value
;
716 case ARM_CPU_MODE_USR
:
717 env
->usr_regs
[regno
- 8] = value
;
719 case ARM_CPU_MODE_FIQ
:
720 env
->fiq_regs
[regno
- 8] = value
;
723 g_assert_not_reached();
727 g_assert_not_reached();
731 uint32_t HELPER(mrs_banked
)(CPUARMState
*env
, uint32_t tgtmode
, uint32_t regno
)
733 msr_mrs_banked_exc_checks(env
, tgtmode
, regno
);
737 return env
->banked_spsr
[bank_number(tgtmode
)];
738 case 17: /* ELR_Hyp */
739 return env
->elr_el
[2];
741 return env
->banked_r13
[bank_number(tgtmode
)];
743 return env
->banked_r14
[r14_bank_number(tgtmode
)];
746 case ARM_CPU_MODE_USR
:
747 return env
->usr_regs
[regno
- 8];
748 case ARM_CPU_MODE_FIQ
:
749 return env
->fiq_regs
[regno
- 8];
751 g_assert_not_reached();
754 g_assert_not_reached();
758 void HELPER(access_check_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t syndrome
,
761 const ARMCPRegInfo
*ri
= rip
;
764 if (arm_feature(env
, ARM_FEATURE_XSCALE
) && ri
->cp
< 14
765 && extract32(env
->cp15
.c15_cpar
, ri
->cp
, 1) == 0) {
766 raise_exception(env
, EXCP_UDEF
, syndrome
, exception_target_el(env
));
773 switch (ri
->accessfn(env
, ri
, isread
)) {
777 target_el
= exception_target_el(env
);
779 case CP_ACCESS_TRAP_EL2
:
780 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
781 * a bug in the access function.
783 assert(!arm_is_secure(env
) && arm_current_el(env
) != 3);
786 case CP_ACCESS_TRAP_EL3
:
789 case CP_ACCESS_TRAP_UNCATEGORIZED
:
790 target_el
= exception_target_el(env
);
791 syndrome
= syn_uncategorized();
793 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2
:
795 syndrome
= syn_uncategorized();
797 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3
:
799 syndrome
= syn_uncategorized();
801 case CP_ACCESS_TRAP_FP_EL2
:
803 /* Since we are an implementation that takes exceptions on a trapped
804 * conditional insn only if the insn has passed its condition code
805 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
806 * (which is also the required value for AArch64 traps).
808 syndrome
= syn_fp_access_trap(1, 0xe, false);
810 case CP_ACCESS_TRAP_FP_EL3
:
812 syndrome
= syn_fp_access_trap(1, 0xe, false);
815 g_assert_not_reached();
818 raise_exception(env
, EXCP_UDEF
, syndrome
, target_el
);
821 void HELPER(set_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t value
)
823 const ARMCPRegInfo
*ri
= rip
;
825 if (ri
->type
& ARM_CP_IO
) {
826 qemu_mutex_lock_iothread();
827 ri
->writefn(env
, ri
, value
);
828 qemu_mutex_unlock_iothread();
830 ri
->writefn(env
, ri
, value
);
834 uint32_t HELPER(get_cp_reg
)(CPUARMState
*env
, void *rip
)
836 const ARMCPRegInfo
*ri
= rip
;
839 if (ri
->type
& ARM_CP_IO
) {
840 qemu_mutex_lock_iothread();
841 res
= ri
->readfn(env
, ri
);
842 qemu_mutex_unlock_iothread();
844 res
= ri
->readfn(env
, ri
);
850 void HELPER(set_cp_reg64
)(CPUARMState
*env
, void *rip
, uint64_t value
)
852 const ARMCPRegInfo
*ri
= rip
;
854 if (ri
->type
& ARM_CP_IO
) {
855 qemu_mutex_lock_iothread();
856 ri
->writefn(env
, ri
, value
);
857 qemu_mutex_unlock_iothread();
859 ri
->writefn(env
, ri
, value
);
863 uint64_t HELPER(get_cp_reg64
)(CPUARMState
*env
, void *rip
)
865 const ARMCPRegInfo
*ri
= rip
;
868 if (ri
->type
& ARM_CP_IO
) {
869 qemu_mutex_lock_iothread();
870 res
= ri
->readfn(env
, ri
);
871 qemu_mutex_unlock_iothread();
873 res
= ri
->readfn(env
, ri
);
879 void HELPER(msr_i_pstate
)(CPUARMState
*env
, uint32_t op
, uint32_t imm
)
881 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
882 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
883 * to catch that case at translate time.
885 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
886 uint32_t syndrome
= syn_aa64_sysregtrap(0, extract32(op
, 0, 3),
887 extract32(op
, 3, 3), 4,
889 raise_exception(env
, EXCP_UDEF
, syndrome
, exception_target_el(env
));
893 case 0x05: /* SPSel */
894 update_spsel(env
, imm
);
896 case 0x1e: /* DAIFSet */
897 env
->daif
|= (imm
<< 6) & PSTATE_DAIF
;
899 case 0x1f: /* DAIFClear */
900 env
->daif
&= ~((imm
<< 6) & PSTATE_DAIF
);
903 g_assert_not_reached();
907 void HELPER(clear_pstate_ss
)(CPUARMState
*env
)
909 env
->pstate
&= ~PSTATE_SS
;
912 void HELPER(pre_hvc
)(CPUARMState
*env
)
914 ARMCPU
*cpu
= arm_env_get_cpu(env
);
915 int cur_el
= arm_current_el(env
);
916 /* FIXME: Use actual secure state. */
920 if (arm_is_psci_call(cpu
, EXCP_HVC
)) {
921 /* If PSCI is enabled and this looks like a valid PSCI call then
922 * that overrides the architecturally mandated HVC behaviour.
927 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
928 /* If EL2 doesn't exist, HVC always UNDEFs */
930 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
931 /* EL3.HCE has priority over EL2.HCD. */
932 undef
= !(env
->cp15
.scr_el3
& SCR_HCE
);
934 undef
= env
->cp15
.hcr_el2
& HCR_HCD
;
937 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
938 * For ARMv8/AArch64, HVC is allowed in EL3.
939 * Note that we've already trapped HVC from EL0 at translation
942 if (secure
&& (!is_a64(env
) || cur_el
== 1)) {
947 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
948 exception_target_el(env
));
952 void HELPER(pre_smc
)(CPUARMState
*env
, uint32_t syndrome
)
954 ARMCPU
*cpu
= arm_env_get_cpu(env
);
955 int cur_el
= arm_current_el(env
);
956 bool secure
= arm_is_secure(env
);
957 bool smd_flag
= env
->cp15
.scr_el3
& SCR_SMD
;
960 * SMC behaviour is summarized in the following table.
961 * This helper handles the "Trap to EL2" and "Undef insn" cases.
962 * The "Trap to EL3" and "PSCI call" cases are handled in the exception
965 * -> ARM_FEATURE_EL3 and !SMD
966 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
968 * Conduit SMC, valid call Trap to EL2 PSCI Call
969 * Conduit SMC, inval call Trap to EL2 Trap to EL3
970 * Conduit not SMC Trap to EL2 Trap to EL3
973 * -> ARM_FEATURE_EL3 and SMD
974 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
976 * Conduit SMC, valid call Trap to EL2 PSCI Call
977 * Conduit SMC, inval call Trap to EL2 Undef insn
978 * Conduit not SMC Trap to EL2 Undef insn
981 * -> !ARM_FEATURE_EL3
982 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
984 * Conduit SMC, valid call Trap to EL2 PSCI Call
985 * Conduit SMC, inval call Trap to EL2 Undef insn
986 * Conduit not SMC Undef insn Undef insn
989 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
990 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
991 * extensions, SMD only applies to NS state.
992 * On ARMv7 without the Virtualization extensions, the SMD bit
993 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
994 * so we need not special case this here.
996 bool smd
= arm_feature(env
, ARM_FEATURE_AARCH64
) ? smd_flag
997 : smd_flag
&& !secure
;
999 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
1000 cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
1001 /* If we have no EL3 then SMC always UNDEFs and can't be
1002 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
1003 * firmware within QEMU, and we want an EL2 guest to be able
1004 * to forbid its EL1 from making PSCI calls into QEMU's
1005 * "firmware" via HCR.TSC, so for these purposes treat
1006 * PSCI-via-SMC as implying an EL3.
1007 * This handles the very last line of the previous table.
1009 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
1010 exception_target_el(env
));
1013 if (cur_el
== 1 && (arm_hcr_el2_eff(env
) & HCR_TSC
)) {
1014 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
1015 * We also want an EL2 guest to be able to forbid its EL1 from
1016 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
1017 * This handles all the "Trap to EL2" cases of the previous table.
1019 raise_exception(env
, EXCP_HYP_TRAP
, syndrome
, 2);
1022 /* Catch the two remaining "Undef insn" cases of the previous table:
1023 * - PSCI conduit is SMC but we don't have a valid PCSI call,
1024 * - We don't have EL3 or SMD is set.
1026 if (!arm_is_psci_call(cpu
, EXCP_SMC
) &&
1027 (smd
|| !arm_feature(env
, ARM_FEATURE_EL3
))) {
1028 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
1029 exception_target_el(env
));
1033 /* Return true if the linked breakpoint entry lbn passes its checks */
1034 static bool linked_bp_matches(ARMCPU
*cpu
, int lbn
)
1036 CPUARMState
*env
= &cpu
->env
;
1037 uint64_t bcr
= env
->cp15
.dbgbcr
[lbn
];
1038 int brps
= extract32(cpu
->dbgdidr
, 24, 4);
1039 int ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
1041 uint32_t contextidr
;
1043 /* Links to unimplemented or non-context aware breakpoints are
1044 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1045 * as if linked to an UNKNOWN context-aware breakpoint (in which
1046 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1047 * We choose the former.
1049 if (lbn
> brps
|| lbn
< (brps
- ctx_cmps
)) {
1053 bcr
= env
->cp15
.dbgbcr
[lbn
];
1055 if (extract64(bcr
, 0, 1) == 0) {
1056 /* Linked breakpoint disabled : generate no events */
1060 bt
= extract64(bcr
, 20, 4);
1062 /* We match the whole register even if this is AArch32 using the
1063 * short descriptor format (in which case it holds both PROCID and ASID),
1064 * since we don't implement the optional v7 context ID masking.
1066 contextidr
= extract64(env
->cp15
.contextidr_el
[1], 0, 32);
1069 case 3: /* linked context ID match */
1070 if (arm_current_el(env
) > 1) {
1071 /* Context matches never fire in EL2 or (AArch64) EL3 */
1074 return (contextidr
== extract64(env
->cp15
.dbgbvr
[lbn
], 0, 32));
1075 case 5: /* linked address mismatch (reserved in AArch64) */
1076 case 9: /* linked VMID match (reserved if no EL2) */
1077 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1079 /* Links to Unlinked context breakpoints must generate no
1080 * events; we choose to do the same for reserved values too.
1088 static bool bp_wp_matches(ARMCPU
*cpu
, int n
, bool is_wp
)
1090 CPUARMState
*env
= &cpu
->env
;
1092 int pac
, hmc
, ssc
, wt
, lbn
;
1093 /* Note that for watchpoints the check is against the CPU security
1094 * state, not the S/NS attribute on the offending data access.
1096 bool is_secure
= arm_is_secure(env
);
1097 int access_el
= arm_current_el(env
);
1100 CPUWatchpoint
*wp
= env
->cpu_watchpoint
[n
];
1102 if (!wp
|| !(wp
->flags
& BP_WATCHPOINT_HIT
)) {
1105 cr
= env
->cp15
.dbgwcr
[n
];
1106 if (wp
->hitattrs
.user
) {
1107 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1108 * match watchpoints as if they were accesses done at EL0, even if
1109 * the CPU is at EL1 or higher.
1114 uint64_t pc
= is_a64(env
) ? env
->pc
: env
->regs
[15];
1116 if (!env
->cpu_breakpoint
[n
] || env
->cpu_breakpoint
[n
]->pc
!= pc
) {
1119 cr
= env
->cp15
.dbgbcr
[n
];
1121 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1122 * enabled and that the address and access type match; for breakpoints
1123 * we know the address matched; check the remaining fields, including
1124 * linked breakpoints. We rely on WCR and BCR having the same layout
1125 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1126 * Note that some combinations of {PAC, HMC, SSC} are reserved and
1127 * must act either like some valid combination or as if the watchpoint
1128 * were disabled. We choose the former, and use this together with
1129 * the fact that EL3 must always be Secure and EL2 must always be
1130 * Non-Secure to simplify the code slightly compared to the full
1131 * table in the ARM ARM.
1133 pac
= extract64(cr
, 1, 2);
1134 hmc
= extract64(cr
, 13, 1);
1135 ssc
= extract64(cr
, 14, 2);
1153 switch (access_el
) {
1161 if (extract32(pac
, 0, 1) == 0) {
1166 if (extract32(pac
, 1, 1) == 0) {
1171 g_assert_not_reached();
1174 wt
= extract64(cr
, 20, 1);
1175 lbn
= extract64(cr
, 16, 4);
1177 if (wt
&& !linked_bp_matches(cpu
, lbn
)) {
1184 static bool check_watchpoints(ARMCPU
*cpu
)
1186 CPUARMState
*env
= &cpu
->env
;
1189 /* If watchpoints are disabled globally or we can't take debug
1190 * exceptions here then watchpoint firings are ignored.
1192 if (extract32(env
->cp15
.mdscr_el1
, 15, 1) == 0
1193 || !arm_generate_debug_exceptions(env
)) {
1197 for (n
= 0; n
< ARRAY_SIZE(env
->cpu_watchpoint
); n
++) {
1198 if (bp_wp_matches(cpu
, n
, true)) {
1205 static bool check_breakpoints(ARMCPU
*cpu
)
1207 CPUARMState
*env
= &cpu
->env
;
1210 /* If breakpoints are disabled globally or we can't take debug
1211 * exceptions here then breakpoint firings are ignored.
1213 if (extract32(env
->cp15
.mdscr_el1
, 15, 1) == 0
1214 || !arm_generate_debug_exceptions(env
)) {
1218 for (n
= 0; n
< ARRAY_SIZE(env
->cpu_breakpoint
); n
++) {
1219 if (bp_wp_matches(cpu
, n
, false)) {
1226 void HELPER(check_breakpoints
)(CPUARMState
*env
)
1228 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1230 if (check_breakpoints(cpu
)) {
1231 HELPER(exception_internal(env
, EXCP_DEBUG
));
1235 bool arm_debug_check_watchpoint(CPUState
*cs
, CPUWatchpoint
*wp
)
1237 /* Called by core code when a CPU watchpoint fires; need to check if this
1238 * is also an architectural watchpoint match.
1240 ARMCPU
*cpu
= ARM_CPU(cs
);
1242 return check_watchpoints(cpu
);
1245 vaddr
arm_adjust_watchpoint_address(CPUState
*cs
, vaddr addr
, int len
)
1247 ARMCPU
*cpu
= ARM_CPU(cs
);
1248 CPUARMState
*env
= &cpu
->env
;
1250 /* In BE32 system mode, target memory is stored byteswapped (on a
1251 * little-endian host system), and by the time we reach here (via an
1252 * opcode helper) the addresses of subword accesses have been adjusted
1253 * to account for that, which means that watchpoints will not match.
1254 * Undo the adjustment here.
1256 if (arm_sctlr_b(env
)) {
1259 } else if (len
== 2) {
1267 void arm_debug_excp_handler(CPUState
*cs
)
1269 /* Called by core code when a watchpoint or breakpoint fires;
1270 * need to check which one and raise the appropriate exception.
1272 ARMCPU
*cpu
= ARM_CPU(cs
);
1273 CPUARMState
*env
= &cpu
->env
;
1274 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
1277 if (wp_hit
->flags
& BP_CPU
) {
1278 bool wnr
= (wp_hit
->flags
& BP_WATCHPOINT_HIT_WRITE
) != 0;
1279 bool same_el
= arm_debug_target_el(env
) == arm_current_el(env
);
1281 cs
->watchpoint_hit
= NULL
;
1283 env
->exception
.fsr
= arm_debug_exception_fsr(env
);
1284 env
->exception
.vaddress
= wp_hit
->hitaddr
;
1285 raise_exception(env
, EXCP_DATA_ABORT
,
1286 syn_watchpoint(same_el
, 0, wnr
),
1287 arm_debug_target_el(env
));
1290 uint64_t pc
= is_a64(env
) ? env
->pc
: env
->regs
[15];
1291 bool same_el
= (arm_debug_target_el(env
) == arm_current_el(env
));
1293 /* (1) GDB breakpoints should be handled first.
1294 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1295 * since singlestep is also done by generating a debug internal
1298 if (cpu_breakpoint_test(cs
, pc
, BP_GDB
)
1299 || !cpu_breakpoint_test(cs
, pc
, BP_CPU
)) {
1303 env
->exception
.fsr
= arm_debug_exception_fsr(env
);
1304 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
1305 * values to the guest that it shouldn't be able to see at its
1306 * exception/security level.
1308 env
->exception
.vaddress
= 0;
1309 raise_exception(env
, EXCP_PREFETCH_ABORT
,
1310 syn_breakpoint(same_el
),
1311 arm_debug_target_el(env
));
1315 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1316 The only way to do that in TCG is a conditional branch, which clobbers
1317 all our temporaries. For now implement these as helper functions. */
1319 /* Similarly for variable shift instructions. */
1321 uint32_t HELPER(shl_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1323 int shift
= i
& 0xff;
1330 } else if (shift
!= 0) {
1331 env
->CF
= (x
>> (32 - shift
)) & 1;
1337 uint32_t HELPER(shr_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1339 int shift
= i
& 0xff;
1342 env
->CF
= (x
>> 31) & 1;
1346 } else if (shift
!= 0) {
1347 env
->CF
= (x
>> (shift
- 1)) & 1;
1353 uint32_t HELPER(sar_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1355 int shift
= i
& 0xff;
1357 env
->CF
= (x
>> 31) & 1;
1358 return (int32_t)x
>> 31;
1359 } else if (shift
!= 0) {
1360 env
->CF
= (x
>> (shift
- 1)) & 1;
1361 return (int32_t)x
>> shift
;
1366 uint32_t HELPER(ror_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1370 shift
= shift1
& 0x1f;
1373 env
->CF
= (x
>> 31) & 1;
1376 env
->CF
= (x
>> (shift
- 1)) & 1;
1377 return ((uint32_t)x
>> shift
) | (x
<< (32 - shift
));