2 * QEMU ARM CPU -- internal functions and types
4 * Copyright (c) 2014 Linaro Ltd
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
28 #include "hw/registerfields.h"
29 #include "tcg/tcg-gvec-desc.h"
32 /* register banks for CPU modes */
42 static inline bool excp_is_internal(int excp
)
44 /* Return true if this exception number represents a QEMU-internal
45 * exception that will not be passed to the guest.
47 return excp
== EXCP_INTERRUPT
50 || excp
== EXCP_HALTED
51 || excp
== EXCP_EXCEPTION_EXIT
52 || excp
== EXCP_KERNEL_TRAP
53 || excp
== EXCP_SEMIHOST
;
56 /* Scale factor for generic timers, ie number of ns per tick.
57 * This gives a 62.5MHz timer.
59 #define GTIMER_SCALE 16
61 /* Bit definitions for the v7M CONTROL register */
62 FIELD(V7M_CONTROL
, NPRIV
, 0, 1)
63 FIELD(V7M_CONTROL
, SPSEL
, 1, 1)
64 FIELD(V7M_CONTROL
, FPCA
, 2, 1)
65 FIELD(V7M_CONTROL
, SFPA
, 3, 1)
67 /* Bit definitions for v7M exception return payload */
68 FIELD(V7M_EXCRET
, ES
, 0, 1)
69 FIELD(V7M_EXCRET
, RES0
, 1, 1)
70 FIELD(V7M_EXCRET
, SPSEL
, 2, 1)
71 FIELD(V7M_EXCRET
, MODE
, 3, 1)
72 FIELD(V7M_EXCRET
, FTYPE
, 4, 1)
73 FIELD(V7M_EXCRET
, DCRS
, 5, 1)
74 FIELD(V7M_EXCRET
, S
, 6, 1)
75 FIELD(V7M_EXCRET
, RES1
, 7, 25) /* including the must-be-1 prefix */
77 /* Minimum value which is a magic number for exception return */
78 #define EXC_RETURN_MIN_MAGIC 0xff000000
79 /* Minimum number which is a magic number for function or exception return
80 * when using v8M security extension
82 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
84 /* We use a few fake FSR values for internal purposes in M profile.
85 * M profile cores don't have A/R format FSRs, but currently our
86 * get_phys_addr() code assumes A/R profile and reports failures via
87 * an A/R format FSR value. We then translate that into the proper
88 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
89 * Mostly the FSR values we use for this are those defined for v7PMSA,
90 * since we share some of that codepath. A few kinds of fault are
91 * only for M profile and have no A/R equivalent, though, so we have
92 * to pick a value from the reserved range (which we never otherwise
93 * generate) to use for these.
94 * These values will never be visible to the guest.
96 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
97 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
100 * raise_exception: Raise the specified exception.
101 * Raise a guest exception with the specified value, syndrome register
102 * and target exception level. This should be called from helper functions,
103 * and never returns because we will longjump back up to the CPU main loop.
105 void QEMU_NORETURN
raise_exception(CPUARMState
*env
, uint32_t excp
,
106 uint32_t syndrome
, uint32_t target_el
);
109 * Similarly, but also use unwinding to restore cpu state.
111 void QEMU_NORETURN
raise_exception_ra(CPUARMState
*env
, uint32_t excp
,
112 uint32_t syndrome
, uint32_t target_el
,
116 * For AArch64, map a given EL to an index in the banked_spsr array.
117 * Note that this mapping and the AArch32 mapping defined in bank_number()
118 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
119 * mandated mapping between each other.
121 static inline unsigned int aarch64_banked_spsr_index(unsigned int el
)
123 static const unsigned int map
[4] = {
124 [1] = BANK_SVC
, /* EL1. */
125 [2] = BANK_HYP
, /* EL2. */
126 [3] = BANK_MON
, /* EL3. */
128 assert(el
>= 1 && el
<= 3);
132 /* Map CPU modes onto saved register banks. */
133 static inline int bank_number(int mode
)
136 case ARM_CPU_MODE_USR
:
137 case ARM_CPU_MODE_SYS
:
139 case ARM_CPU_MODE_SVC
:
141 case ARM_CPU_MODE_ABT
:
143 case ARM_CPU_MODE_UND
:
145 case ARM_CPU_MODE_IRQ
:
147 case ARM_CPU_MODE_FIQ
:
149 case ARM_CPU_MODE_HYP
:
151 case ARM_CPU_MODE_MON
:
154 g_assert_not_reached();
158 * r14_bank_number: Map CPU mode onto register bank for r14
160 * Given an AArch32 CPU mode, return the index into the saved register
161 * banks to use for the R14 (LR) in that mode. This is the same as
162 * bank_number(), except for the special case of Hyp mode, where
163 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
164 * This should be used as the index into env->banked_r14[], and
165 * bank_number() used for the index into env->banked_r13[] and
166 * env->banked_spsr[].
168 static inline int r14_bank_number(int mode
)
170 return (mode
== ARM_CPU_MODE_HYP
) ? BANK_USRSYS
: bank_number(mode
);
173 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
);
174 void arm_translate_init(void);
177 void arm_cpu_synchronize_from_tb(CPUState
*cs
, const TranslationBlock
*tb
);
178 #endif /* CONFIG_TCG */
181 enum arm_fprounding
{
190 int arm_rmode_to_sf(int rmode
);
192 static inline void aarch64_save_sp(CPUARMState
*env
, int el
)
194 if (env
->pstate
& PSTATE_SP
) {
195 env
->sp_el
[el
] = env
->xregs
[31];
197 env
->sp_el
[0] = env
->xregs
[31];
201 static inline void aarch64_restore_sp(CPUARMState
*env
, int el
)
203 if (env
->pstate
& PSTATE_SP
) {
204 env
->xregs
[31] = env
->sp_el
[el
];
206 env
->xregs
[31] = env
->sp_el
[0];
210 static inline void update_spsel(CPUARMState
*env
, uint32_t imm
)
212 unsigned int cur_el
= arm_current_el(env
);
213 /* Update PSTATE SPSel bit; this requires us to update the
214 * working stack pointer in xregs[31].
216 if (!((imm
^ env
->pstate
) & PSTATE_SP
)) {
219 aarch64_save_sp(env
, cur_el
);
220 env
->pstate
= deposit32(env
->pstate
, 0, 1, imm
);
222 /* We rely on illegal updates to SPsel from EL0 to get trapped
223 * at translation time.
225 assert(cur_el
>= 1 && cur_el
<= 3);
226 aarch64_restore_sp(env
, cur_el
);
233 * Returns the implementation defined bit-width of physical addresses.
234 * The ARMv8 reference manuals refer to this as PAMax().
236 static inline unsigned int arm_pamax(ARMCPU
*cpu
)
238 static const unsigned int pamax_map
[] = {
246 unsigned int parange
=
247 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
249 /* id_aa64mmfr0 is a read-only register so values outside of the
250 * supported mappings can be considered an implementation error. */
251 assert(parange
< ARRAY_SIZE(pamax_map
));
252 return pamax_map
[parange
];
255 /* Return true if extended addresses are enabled.
256 * This is always the case if our translation regime is 64 bit,
257 * but depends on TTBCR.EAE for 32 bit.
259 static inline bool extended_addresses_enabled(CPUARMState
*env
)
261 TCR
*tcr
= &env
->cp15
.tcr_el
[arm_is_secure(env
) ? 3 : 1];
262 return arm_el_is_aa64(env
, 1) ||
263 (arm_feature(env
, ARM_FEATURE_LPAE
) && (tcr
->raw_tcr
& TTBCR_EAE
));
266 /* Update a QEMU watchpoint based on the information the guest has set in the
267 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
269 void hw_watchpoint_update(ARMCPU
*cpu
, int n
);
270 /* Update the QEMU watchpoints for every guest watchpoint. This does a
271 * complete delete-and-reinstate of the QEMU watchpoint list and so is
272 * suitable for use after migration or on reset.
274 void hw_watchpoint_update_all(ARMCPU
*cpu
);
275 /* Update a QEMU breakpoint based on the information the guest has set in the
276 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
278 void hw_breakpoint_update(ARMCPU
*cpu
, int n
);
279 /* Update the QEMU breakpoints for every guest breakpoint. This does a
280 * complete delete-and-reinstate of the QEMU breakpoint list and so is
281 * suitable for use after migration or on reset.
283 void hw_breakpoint_update_all(ARMCPU
*cpu
);
285 /* Callback function for checking if a watchpoint should trigger. */
286 bool arm_debug_check_watchpoint(CPUState
*cs
, CPUWatchpoint
*wp
);
288 /* Adjust addresses (in BE32 mode) before testing against watchpoint
291 vaddr
arm_adjust_watchpoint_address(CPUState
*cs
, vaddr addr
, int len
);
293 /* Callback function for when a watchpoint or breakpoint triggers. */
294 void arm_debug_excp_handler(CPUState
*cs
);
296 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
297 static inline bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
)
301 static inline void arm_handle_psci_call(ARMCPU
*cpu
)
303 g_assert_not_reached();
306 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
307 bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
);
308 /* Actually handle a PSCI call */
309 void arm_handle_psci_call(ARMCPU
*cpu
);
313 * arm_clear_exclusive: clear the exclusive monitor
315 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
317 static inline void arm_clear_exclusive(CPUARMState
*env
)
319 env
->exclusive_addr
= -1;
323 * ARMFaultType: type of an ARM MMU fault
324 * This corresponds to the v8A pseudocode's Fault enumeration,
325 * with extensions for QEMU internal conditions.
327 typedef enum ARMFaultType
{
334 ARMFault_Translation
,
335 ARMFault_AddressSize
,
336 ARMFault_SyncExternal
,
337 ARMFault_SyncExternalOnWalk
,
339 ARMFault_SyncParityOnWalk
,
340 ARMFault_AsyncParity
,
341 ARMFault_AsyncExternal
,
343 ARMFault_TLBConflict
,
346 ARMFault_ICacheMaint
,
347 ARMFault_QEMU_NSCExec
, /* v8M: NS executing in S&NSC memory */
348 ARMFault_QEMU_SFault
, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
352 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
353 * @type: Type of fault
354 * @level: Table walk level (for translation, access flag and permission faults)
355 * @domain: Domain of the fault address (for non-LPAE CPUs only)
356 * @s2addr: Address that caused a fault at stage 2
357 * @stage2: True if we faulted at stage 2
358 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
359 * @s1ns: True if we faulted on a non-secure IPA while in secure state
360 * @ea: True if we should set the EA (external abort type) bit in syndrome
362 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo
;
363 struct ARMMMUFaultInfo
{
375 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
376 * Compare pseudocode EncodeSDFSC(), though unlike that function
377 * we set up a whole FSR-format code including domain field and
378 * putting the high bit of the FSC into bit 10.
380 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo
*fi
)
387 case ARMFault_AccessFlag
:
388 fsc
= fi
->level
== 1 ? 0x3 : 0x6;
390 case ARMFault_Alignment
:
393 case ARMFault_Permission
:
394 fsc
= fi
->level
== 1 ? 0xd : 0xf;
396 case ARMFault_Domain
:
397 fsc
= fi
->level
== 1 ? 0x9 : 0xb;
399 case ARMFault_Translation
:
400 fsc
= fi
->level
== 1 ? 0x5 : 0x7;
402 case ARMFault_SyncExternal
:
403 fsc
= 0x8 | (fi
->ea
<< 12);
405 case ARMFault_SyncExternalOnWalk
:
406 fsc
= fi
->level
== 1 ? 0xc : 0xe;
407 fsc
|= (fi
->ea
<< 12);
409 case ARMFault_SyncParity
:
412 case ARMFault_SyncParityOnWalk
:
413 fsc
= fi
->level
== 1 ? 0x40c : 0x40e;
415 case ARMFault_AsyncParity
:
418 case ARMFault_AsyncExternal
:
419 fsc
= 0x406 | (fi
->ea
<< 12);
424 case ARMFault_TLBConflict
:
427 case ARMFault_Lockdown
:
430 case ARMFault_Exclusive
:
433 case ARMFault_ICacheMaint
:
436 case ARMFault_Background
:
439 case ARMFault_QEMU_NSCExec
:
440 fsc
= M_FAKE_FSR_NSC_EXEC
;
442 case ARMFault_QEMU_SFault
:
443 fsc
= M_FAKE_FSR_SFAULT
;
446 /* Other faults can't occur in a context that requires a
447 * short-format status code.
449 g_assert_not_reached();
452 fsc
|= (fi
->domain
<< 4);
457 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
458 * Compare pseudocode EncodeLDFSC(), though unlike that function
459 * we fill in also the LPAE bit 9 of a DFSR format.
461 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo
*fi
)
468 case ARMFault_AddressSize
:
471 case ARMFault_AccessFlag
:
472 fsc
= (fi
->level
& 3) | (0x2 << 2);
474 case ARMFault_Permission
:
475 fsc
= (fi
->level
& 3) | (0x3 << 2);
477 case ARMFault_Translation
:
478 fsc
= (fi
->level
& 3) | (0x1 << 2);
480 case ARMFault_SyncExternal
:
481 fsc
= 0x10 | (fi
->ea
<< 12);
483 case ARMFault_SyncExternalOnWalk
:
484 fsc
= (fi
->level
& 3) | (0x5 << 2) | (fi
->ea
<< 12);
486 case ARMFault_SyncParity
:
489 case ARMFault_SyncParityOnWalk
:
490 fsc
= (fi
->level
& 3) | (0x7 << 2);
492 case ARMFault_AsyncParity
:
495 case ARMFault_AsyncExternal
:
496 fsc
= 0x11 | (fi
->ea
<< 12);
498 case ARMFault_Alignment
:
504 case ARMFault_TLBConflict
:
507 case ARMFault_Lockdown
:
510 case ARMFault_Exclusive
:
514 /* Other faults can't occur in a context that requires a
515 * long-format status code.
517 g_assert_not_reached();
524 static inline bool arm_extabort_type(MemTxResult result
)
526 /* The EA bit in syndromes and fault status registers is an
527 * IMPDEF classification of external aborts. ARM implementations
528 * usually use this to indicate AXI bus Decode error (0) or
529 * Slave error (1); in QEMU we follow that.
531 return result
!= MEMTX_DECODE_ERROR
;
534 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
535 MMUAccessType access_type
, int mmu_idx
,
536 bool probe
, uintptr_t retaddr
);
538 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx
)
540 return mmu_idx
& ARM_MMU_IDX_COREIDX_MASK
;
543 static inline ARMMMUIdx
core_to_arm_mmu_idx(CPUARMState
*env
, int mmu_idx
)
545 if (arm_feature(env
, ARM_FEATURE_M
)) {
546 return mmu_idx
| ARM_MMU_IDX_M
;
548 return mmu_idx
| ARM_MMU_IDX_A
;
552 static inline ARMMMUIdx
core_to_aa64_mmu_idx(int mmu_idx
)
554 /* AArch64 is always a-profile. */
555 return mmu_idx
| ARM_MMU_IDX_A
;
558 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
);
561 * Return the MMU index for a v7M CPU with all relevant information
562 * manually specified.
564 ARMMMUIdx
arm_v7m_mmu_idx_all(CPUARMState
*env
,
565 bool secstate
, bool priv
, bool negpri
);
568 * Return the MMU index for a v7M CPU in the specified security and
571 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
572 bool secstate
, bool priv
);
574 /* Return the MMU index for a v7M CPU in the specified security state */
575 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
);
577 /* Return true if the stage 1 translation regime is using LPAE format page
579 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
);
581 /* Raise a data fault alignment exception for the specified virtual address */
582 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
583 MMUAccessType access_type
,
584 int mmu_idx
, uintptr_t retaddr
);
586 /* arm_cpu_do_transaction_failed: handle a memory system error response
587 * (eg "no device/memory present at address") by raising an external abort
590 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
591 vaddr addr
, unsigned size
,
592 MMUAccessType access_type
,
593 int mmu_idx
, MemTxAttrs attrs
,
594 MemTxResult response
, uintptr_t retaddr
);
596 /* Call any registered EL change hooks */
597 static inline void arm_call_pre_el_change_hook(ARMCPU
*cpu
)
599 ARMELChangeHook
*hook
, *next
;
600 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
601 hook
->hook(cpu
, hook
->opaque
);
604 static inline void arm_call_el_change_hook(ARMCPU
*cpu
)
606 ARMELChangeHook
*hook
, *next
;
607 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
608 hook
->hook(cpu
, hook
->opaque
);
612 /* Return true if this address translation regime has two ranges. */
613 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx
)
616 case ARMMMUIdx_Stage1_E0
:
617 case ARMMMUIdx_Stage1_E1
:
618 case ARMMMUIdx_Stage1_E1_PAN
:
619 case ARMMMUIdx_Stage1_SE0
:
620 case ARMMMUIdx_Stage1_SE1
:
621 case ARMMMUIdx_Stage1_SE1_PAN
:
622 case ARMMMUIdx_E10_0
:
623 case ARMMMUIdx_E10_1
:
624 case ARMMMUIdx_E10_1_PAN
:
625 case ARMMMUIdx_E20_0
:
626 case ARMMMUIdx_E20_2
:
627 case ARMMMUIdx_E20_2_PAN
:
628 case ARMMMUIdx_SE10_0
:
629 case ARMMMUIdx_SE10_1
:
630 case ARMMMUIdx_SE10_1_PAN
:
631 case ARMMMUIdx_SE20_0
:
632 case ARMMMUIdx_SE20_2
:
633 case ARMMMUIdx_SE20_2_PAN
:
640 /* Return true if this address translation regime is secure */
641 static inline bool regime_is_secure(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
644 case ARMMMUIdx_E10_0
:
645 case ARMMMUIdx_E10_1
:
646 case ARMMMUIdx_E10_1_PAN
:
647 case ARMMMUIdx_E20_0
:
648 case ARMMMUIdx_E20_2
:
649 case ARMMMUIdx_E20_2_PAN
:
650 case ARMMMUIdx_Stage1_E0
:
651 case ARMMMUIdx_Stage1_E1
:
652 case ARMMMUIdx_Stage1_E1_PAN
:
654 case ARMMMUIdx_Stage2
:
655 case ARMMMUIdx_MPrivNegPri
:
656 case ARMMMUIdx_MUserNegPri
:
657 case ARMMMUIdx_MPriv
:
658 case ARMMMUIdx_MUser
:
661 case ARMMMUIdx_SE10_0
:
662 case ARMMMUIdx_SE10_1
:
663 case ARMMMUIdx_SE10_1_PAN
:
664 case ARMMMUIdx_SE20_0
:
665 case ARMMMUIdx_SE20_2
:
666 case ARMMMUIdx_SE20_2_PAN
:
667 case ARMMMUIdx_Stage1_SE0
:
668 case ARMMMUIdx_Stage1_SE1
:
669 case ARMMMUIdx_Stage1_SE1_PAN
:
671 case ARMMMUIdx_Stage2_S
:
672 case ARMMMUIdx_MSPrivNegPri
:
673 case ARMMMUIdx_MSUserNegPri
:
674 case ARMMMUIdx_MSPriv
:
675 case ARMMMUIdx_MSUser
:
678 g_assert_not_reached();
682 static inline bool regime_is_pan(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
685 case ARMMMUIdx_Stage1_E1_PAN
:
686 case ARMMMUIdx_Stage1_SE1_PAN
:
687 case ARMMMUIdx_E10_1_PAN
:
688 case ARMMMUIdx_E20_2_PAN
:
689 case ARMMMUIdx_SE10_1_PAN
:
690 case ARMMMUIdx_SE20_2_PAN
:
697 /* Return the exception level which controls this address translation regime */
698 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
701 case ARMMMUIdx_SE20_0
:
702 case ARMMMUIdx_SE20_2
:
703 case ARMMMUIdx_SE20_2_PAN
:
704 case ARMMMUIdx_E20_0
:
705 case ARMMMUIdx_E20_2
:
706 case ARMMMUIdx_E20_2_PAN
:
707 case ARMMMUIdx_Stage2
:
708 case ARMMMUIdx_Stage2_S
:
714 case ARMMMUIdx_SE10_0
:
715 case ARMMMUIdx_Stage1_SE0
:
716 return arm_el_is_aa64(env
, 3) ? 1 : 3;
717 case ARMMMUIdx_SE10_1
:
718 case ARMMMUIdx_SE10_1_PAN
:
719 case ARMMMUIdx_Stage1_E0
:
720 case ARMMMUIdx_Stage1_E1
:
721 case ARMMMUIdx_Stage1_E1_PAN
:
722 case ARMMMUIdx_Stage1_SE1
:
723 case ARMMMUIdx_Stage1_SE1_PAN
:
724 case ARMMMUIdx_E10_0
:
725 case ARMMMUIdx_E10_1
:
726 case ARMMMUIdx_E10_1_PAN
:
727 case ARMMMUIdx_MPrivNegPri
:
728 case ARMMMUIdx_MUserNegPri
:
729 case ARMMMUIdx_MPriv
:
730 case ARMMMUIdx_MUser
:
731 case ARMMMUIdx_MSPrivNegPri
:
732 case ARMMMUIdx_MSUserNegPri
:
733 case ARMMMUIdx_MSPriv
:
734 case ARMMMUIdx_MSUser
:
737 g_assert_not_reached();
741 /* Return the TCR controlling this translation regime */
742 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
744 if (mmu_idx
== ARMMMUIdx_Stage2
) {
745 return &env
->cp15
.vtcr_el2
;
747 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
749 * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
750 * those are not currently used by QEMU, so just return VSTCR_EL2.
752 return &env
->cp15
.vstcr_el2
;
754 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
757 /* Return the FSR value for a debug exception (watchpoint, hardware
758 * breakpoint or BKPT insn) targeting the specified exception level.
760 static inline uint32_t arm_debug_exception_fsr(CPUARMState
*env
)
762 ARMMMUFaultInfo fi
= { .type
= ARMFault_Debug
};
763 int target_el
= arm_debug_target_el(env
);
764 bool using_lpae
= false;
766 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
)) {
769 if (arm_feature(env
, ARM_FEATURE_LPAE
) &&
770 (env
->cp15
.tcr_el
[target_el
].raw_tcr
& TTBCR_EAE
)) {
776 return arm_fi_to_lfsc(&fi
);
778 return arm_fi_to_sfsc(&fi
);
783 * arm_num_brps: Return number of implemented breakpoints.
784 * Note that the ID register BRPS field is "number of bps - 1",
785 * and we return the actual number of breakpoints.
787 static inline int arm_num_brps(ARMCPU
*cpu
)
789 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
790 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, BRPS
) + 1;
792 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, BRPS
) + 1;
797 * arm_num_wrps: Return number of implemented watchpoints.
798 * Note that the ID register WRPS field is "number of wps - 1",
799 * and we return the actual number of watchpoints.
801 static inline int arm_num_wrps(ARMCPU
*cpu
)
803 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
804 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, WRPS
) + 1;
806 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, WRPS
) + 1;
811 * arm_num_ctx_cmps: Return number of implemented context comparators.
812 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
813 * and we return the actual number of comparators.
815 static inline int arm_num_ctx_cmps(ARMCPU
*cpu
)
817 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
818 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, CTX_CMPS
) + 1;
820 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, CTX_CMPS
) + 1;
825 * v7m_using_psp: Return true if using process stack pointer
826 * Return true if the CPU is currently using the process stack
827 * pointer, or false if it is using the main stack pointer.
829 static inline bool v7m_using_psp(CPUARMState
*env
)
831 /* Handler mode always uses the main stack; for thread mode
832 * the CONTROL.SPSEL bit determines the answer.
833 * Note that in v7M it is not possible to be in Handler mode with
834 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
836 return !arm_v7m_is_handler_mode(env
) &&
837 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
841 * v7m_sp_limit: Return SP limit for current CPU state
842 * Return the SP limit value for the current CPU security state
845 static inline uint32_t v7m_sp_limit(CPUARMState
*env
)
847 if (v7m_using_psp(env
)) {
848 return env
->v7m
.psplim
[env
->v7m
.secure
];
850 return env
->v7m
.msplim
[env
->v7m
.secure
];
856 * Return true if the v7M CPACR permits access to the FPU for the specified
857 * security state and privilege level.
859 static inline bool v7m_cpacr_pass(CPUARMState
*env
,
860 bool is_secure
, bool is_priv
)
862 switch (extract32(env
->v7m
.cpacr
[is_secure
], 20, 2)) {
864 case 2: /* UNPREDICTABLE: we treat like 0 */
871 g_assert_not_reached();
876 * aarch32_mode_name(): Return name of the AArch32 CPU mode
877 * @psr: Program Status Register indicating CPU mode
879 * Returns, for debug logging purposes, a printable representation
880 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
881 * the low bits of the specified PSR.
883 static inline const char *aarch32_mode_name(uint32_t psr
)
885 static const char cpu_mode_names
[16][4] = {
886 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
887 "???", "???", "hyp", "und", "???", "???", "???", "sys"
890 return cpu_mode_names
[psr
& 0xf];
894 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
896 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
897 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
898 * Must be called with the iothread lock held.
900 void arm_cpu_update_virq(ARMCPU
*cpu
);
903 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
905 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
906 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
907 * Must be called with the iothread lock held.
909 void arm_cpu_update_vfiq(ARMCPU
*cpu
);
913 * @env: The cpu environment
914 * @el: The EL to use.
916 * Return the full ARMMMUIdx for the translation regime for EL.
918 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
);
922 * @env: The cpu environment
924 * Return the full ARMMMUIdx for the current translation regime.
926 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
);
929 * arm_stage1_mmu_idx:
930 * @env: The cpu environment
932 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
934 #ifdef CONFIG_USER_ONLY
935 static inline ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
937 return ARMMMUIdx_Stage1_E0
;
940 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
);
944 * arm_mmu_idx_is_stage1_of_2:
945 * @mmu_idx: The ARMMMUIdx to test
947 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
948 * first stage of a two stage regime.
950 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx
)
953 case ARMMMUIdx_Stage1_E0
:
954 case ARMMMUIdx_Stage1_E1
:
955 case ARMMMUIdx_Stage1_E1_PAN
:
956 case ARMMMUIdx_Stage1_SE0
:
957 case ARMMMUIdx_Stage1_SE1
:
958 case ARMMMUIdx_Stage1_SE1_PAN
:
965 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features
,
966 const ARMISARegisters
*id
)
968 uint32_t valid
= CPSR_M
| CPSR_AIF
| CPSR_IL
| CPSR_NZCV
;
970 if ((features
>> ARM_FEATURE_V4T
) & 1) {
973 if ((features
>> ARM_FEATURE_V5
) & 1) {
974 valid
|= CPSR_Q
; /* V5TE in reality*/
976 if ((features
>> ARM_FEATURE_V6
) & 1) {
977 valid
|= CPSR_E
| CPSR_GE
;
979 if ((features
>> ARM_FEATURE_THUMB2
) & 1) {
982 if (isar_feature_aa32_jazelle(id
)) {
985 if (isar_feature_aa32_pan(id
)) {
988 if (isar_feature_aa32_dit(id
)) {
991 if (isar_feature_aa32_ssbs(id
)) {
998 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters
*id
)
1002 valid
= PSTATE_M
| PSTATE_DAIF
| PSTATE_IL
| PSTATE_SS
| PSTATE_NZCV
;
1003 if (isar_feature_aa64_bti(id
)) {
1004 valid
|= PSTATE_BTYPE
;
1006 if (isar_feature_aa64_pan(id
)) {
1007 valid
|= PSTATE_PAN
;
1009 if (isar_feature_aa64_uao(id
)) {
1010 valid
|= PSTATE_UAO
;
1012 if (isar_feature_aa64_dit(id
)) {
1013 valid
|= PSTATE_DIT
;
1015 if (isar_feature_aa64_ssbs(id
)) {
1016 valid
|= PSTATE_SSBS
;
1018 if (isar_feature_aa64_mte(id
)) {
1019 valid
|= PSTATE_TCO
;
1026 * Parameters of a given virtual address, as extracted from the
1027 * translation control register (TCR) for a given regime.
1029 typedef struct ARMVAParameters
{
1031 unsigned select
: 1;
1039 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
1040 ARMMMUIdx mmu_idx
, bool data
);
1042 static inline int exception_target_el(CPUARMState
*env
)
1044 int target_el
= MAX(1, arm_current_el(env
));
1047 * No such thing as secure EL1 if EL3 is aarch32,
1048 * so update the target EL to EL3 in this case.
1050 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3) && target_el
== 1) {
1057 /* Determine if allocation tags are available. */
1058 static inline bool allocation_tag_access_enabled(CPUARMState
*env
, int el
,
1062 && arm_feature(env
, ARM_FEATURE_EL3
)
1063 && !(env
->cp15
.scr_el3
& SCR_ATA
)) {
1066 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
1067 uint64_t hcr
= arm_hcr_el2_eff(env
);
1068 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
1072 sctlr
&= (el
== 0 ? SCTLR_ATA0
: SCTLR_ATA
);
1076 #ifndef CONFIG_USER_ONLY
1078 /* Security attributes for an address, as returned by v8m_security_lookup. */
1079 typedef struct V8M_SAttributes
{
1080 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
1089 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
1090 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1091 V8M_SAttributes
*sattrs
);
1093 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
1094 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1095 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
1096 int *prot
, bool *is_subpage
,
1097 ARMMMUFaultInfo
*fi
, uint32_t *mregion
);
1099 /* Cacheability and shareability attributes for a memory access */
1100 typedef struct ARMCacheAttrs
{
1101 unsigned int attrs
:8; /* as in the MAIR register encoding */
1102 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
1105 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
1106 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1107 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
1108 target_ulong
*page_size
,
1109 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
1110 __attribute__((nonnull
));
1112 void arm_log_exception(int idx
);
1114 #endif /* !CONFIG_USER_ONLY */
1117 * The log2 of the words in the tag block, for GMID_EL1.BS.
1118 * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1120 #define GMID_EL1_BS 6
1122 /* We associate one allocation tag per 16 bytes, the minimum. */
1123 #define LOG2_TAG_GRANULE 4
1124 #define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
1127 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1128 * the same simd_desc() encoding due to restrictions on size.
1129 * Use these instead.
1131 FIELD(PREDDESC
, OPRSZ
, 0, 6)
1132 FIELD(PREDDESC
, ESZ
, 6, 2)
1133 FIELD(PREDDESC
, DATA
, 8, 24)
1136 * The SVE simd_data field, for memory ops, contains either
1137 * rd (5 bits) or a shift count (2 bits).
1139 #define SVE_MTEDESC_SHIFT 5
1141 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1142 FIELD(MTEDESC
, MIDX
, 0, 4)
1143 FIELD(MTEDESC
, TBI
, 4, 2)
1144 FIELD(MTEDESC
, TCMA
, 6, 2)
1145 FIELD(MTEDESC
, WRITE
, 8, 1)
1146 FIELD(MTEDESC
, SIZEM1
, 9, SIMD_DATA_BITS
- 9) /* size - 1 */
1148 bool mte_probe(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
);
1149 uint64_t mte_check(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
, uintptr_t ra
);
1151 static inline int allocation_tag_from_addr(uint64_t ptr
)
1153 return extract64(ptr
, 56, 4);
1156 static inline uint64_t address_with_allocation_tag(uint64_t ptr
, int rtag
)
1158 return deposit64(ptr
, 56, 4, rtag
);
1161 /* Return true if tbi bits mean that the access is checked. */
1162 static inline bool tbi_check(uint32_t desc
, int bit55
)
1164 return (desc
>> (R_MTEDESC_TBI_SHIFT
+ bit55
)) & 1;
1167 /* Return true if tcma bits mean that the access is unchecked. */
1168 static inline bool tcma_check(uint32_t desc
, int bit55
, int ptr_tag
)
1171 * We had extracted bit55 and ptr_tag for other reasons, so fold
1172 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1174 bool match
= ((ptr_tag
+ bit55
) & 0xf) == 0;
1175 bool tcma
= (desc
>> (R_MTEDESC_TCMA_SHIFT
+ bit55
)) & 1;
1176 return tcma
&& match
;
1180 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1181 * for the tag to be present in the FAR_ELx register. But for user-only
1182 * mode, we do not have a TLB with which to implement this, so we must
1183 * remove the top byte.
1185 static inline uint64_t useronly_clean_ptr(uint64_t ptr
)
1187 #ifdef CONFIG_USER_ONLY
1188 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1189 ptr
&= sextract64(ptr
, 0, 56);
1194 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc
, uint64_t ptr
)
1196 #ifdef CONFIG_USER_ONLY
1197 int64_t clean_ptr
= sextract64(ptr
, 0, 56);
1198 if (tbi_check(desc
, clean_ptr
< 0)) {