2 * QEMU ARM CPU -- internal functions and types
4 * Copyright (c) 2014 Linaro Ltd
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
28 #include "hw/registerfields.h"
29 #include "tcg/tcg-gvec-desc.h"
31 #include "cpu-features.h"
33 /* register banks for CPU modes */
43 static inline int arm_env_mmu_index(CPUARMState
*env
)
45 return EX_TBFLAG_ANY(env
->hflags
, MMUIDX
);
48 static inline bool excp_is_internal(int excp
)
50 /* Return true if this exception number represents a QEMU-internal
51 * exception that will not be passed to the guest.
53 return excp
== EXCP_INTERRUPT
56 || excp
== EXCP_HALTED
57 || excp
== EXCP_EXCEPTION_EXIT
58 || excp
== EXCP_KERNEL_TRAP
59 || excp
== EXCP_SEMIHOST
;
62 /* Scale factor for generic timers, ie number of ns per tick.
63 * This gives a 62.5MHz timer.
65 #define GTIMER_SCALE 16
67 /* Bit definitions for the v7M CONTROL register */
68 FIELD(V7M_CONTROL
, NPRIV
, 0, 1)
69 FIELD(V7M_CONTROL
, SPSEL
, 1, 1)
70 FIELD(V7M_CONTROL
, FPCA
, 2, 1)
71 FIELD(V7M_CONTROL
, SFPA
, 3, 1)
73 /* Bit definitions for v7M exception return payload */
74 FIELD(V7M_EXCRET
, ES
, 0, 1)
75 FIELD(V7M_EXCRET
, RES0
, 1, 1)
76 FIELD(V7M_EXCRET
, SPSEL
, 2, 1)
77 FIELD(V7M_EXCRET
, MODE
, 3, 1)
78 FIELD(V7M_EXCRET
, FTYPE
, 4, 1)
79 FIELD(V7M_EXCRET
, DCRS
, 5, 1)
80 FIELD(V7M_EXCRET
, S
, 6, 1)
81 FIELD(V7M_EXCRET
, RES1
, 7, 25) /* including the must-be-1 prefix */
83 /* Minimum value which is a magic number for exception return */
84 #define EXC_RETURN_MIN_MAGIC 0xff000000
85 /* Minimum number which is a magic number for function or exception return
86 * when using v8M security extension
88 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
90 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
91 FIELD(DBGWCR
, E
, 0, 1)
92 FIELD(DBGWCR
, PAC
, 1, 2)
93 FIELD(DBGWCR
, LSC
, 3, 2)
94 FIELD(DBGWCR
, BAS
, 5, 8)
95 FIELD(DBGWCR
, HMC
, 13, 1)
96 FIELD(DBGWCR
, SSC
, 14, 2)
97 FIELD(DBGWCR
, LBN
, 16, 4)
98 FIELD(DBGWCR
, WT
, 20, 1)
99 FIELD(DBGWCR
, MASK
, 24, 5)
100 FIELD(DBGWCR
, SSCE
, 29, 1)
102 #define VTCR_NSW (1u << 29)
103 #define VTCR_NSA (1u << 30)
104 #define VSTCR_SW VTCR_NSW
105 #define VSTCR_SA VTCR_NSA
107 /* Bit definitions for CPACR (AArch32 only) */
108 FIELD(CPACR
, CP10
, 20, 2)
109 FIELD(CPACR
, CP11
, 22, 2)
110 FIELD(CPACR
, TRCDIS
, 28, 1) /* matches CPACR_EL1.TTA */
111 FIELD(CPACR
, D32DIS
, 30, 1) /* up to v7; RAZ in v8 */
112 FIELD(CPACR
, ASEDIS
, 31, 1)
114 /* Bit definitions for CPACR_EL1 (AArch64 only) */
115 FIELD(CPACR_EL1
, ZEN
, 16, 2)
116 FIELD(CPACR_EL1
, FPEN
, 20, 2)
117 FIELD(CPACR_EL1
, SMEN
, 24, 2)
118 FIELD(CPACR_EL1
, TTA
, 28, 1) /* matches CPACR.TRCDIS */
120 /* Bit definitions for HCPTR (AArch32 only) */
121 FIELD(HCPTR
, TCP10
, 10, 1)
122 FIELD(HCPTR
, TCP11
, 11, 1)
123 FIELD(HCPTR
, TASE
, 15, 1)
124 FIELD(HCPTR
, TTA
, 20, 1)
125 FIELD(HCPTR
, TAM
, 30, 1) /* matches CPTR_EL2.TAM */
126 FIELD(HCPTR
, TCPAC
, 31, 1) /* matches CPTR_EL2.TCPAC */
128 /* Bit definitions for CPTR_EL2 (AArch64 only) */
129 FIELD(CPTR_EL2
, TZ
, 8, 1) /* !E2H */
130 FIELD(CPTR_EL2
, TFP
, 10, 1) /* !E2H, matches HCPTR.TCP10 */
131 FIELD(CPTR_EL2
, TSM
, 12, 1) /* !E2H */
132 FIELD(CPTR_EL2
, ZEN
, 16, 2) /* E2H */
133 FIELD(CPTR_EL2
, FPEN
, 20, 2) /* E2H */
134 FIELD(CPTR_EL2
, SMEN
, 24, 2) /* E2H */
135 FIELD(CPTR_EL2
, TTA
, 28, 1)
136 FIELD(CPTR_EL2
, TAM
, 30, 1) /* matches HCPTR.TAM */
137 FIELD(CPTR_EL2
, TCPAC
, 31, 1) /* matches HCPTR.TCPAC */
139 /* Bit definitions for CPTR_EL3 (AArch64 only) */
140 FIELD(CPTR_EL3
, EZ
, 8, 1)
141 FIELD(CPTR_EL3
, TFP
, 10, 1)
142 FIELD(CPTR_EL3
, ESM
, 12, 1)
143 FIELD(CPTR_EL3
, TTA
, 20, 1)
144 FIELD(CPTR_EL3
, TAM
, 30, 1)
145 FIELD(CPTR_EL3
, TCPAC
, 31, 1)
147 #define MDCR_MTPME (1U << 28)
148 #define MDCR_TDCC (1U << 27)
149 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */
150 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */
151 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */
152 #define MDCR_EPMAD (1U << 21)
153 #define MDCR_EDAD (1U << 20)
154 #define MDCR_TTRF (1U << 19)
155 #define MDCR_STE (1U << 18) /* MDCR_EL3 */
156 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */
157 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */
158 #define MDCR_SDD (1U << 16)
159 #define MDCR_SPD (3U << 14)
160 #define MDCR_TDRA (1U << 11)
161 #define MDCR_TDOSA (1U << 10)
162 #define MDCR_TDA (1U << 9)
163 #define MDCR_TDE (1U << 8)
164 #define MDCR_HPME (1U << 7)
165 #define MDCR_TPM (1U << 6)
166 #define MDCR_TPMCR (1U << 5)
167 #define MDCR_HPMN (0x1fU)
169 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
170 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \
171 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \
172 MDCR_STE | MDCR_SPME | MDCR_SPD)
174 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
175 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
176 #define TTBCR_PD0 (1U << 4)
177 #define TTBCR_PD1 (1U << 5)
178 #define TTBCR_EPD0 (1U << 7)
179 #define TTBCR_IRGN0 (3U << 8)
180 #define TTBCR_ORGN0 (3U << 10)
181 #define TTBCR_SH0 (3U << 12)
182 #define TTBCR_T1SZ (3U << 16)
183 #define TTBCR_A1 (1U << 22)
184 #define TTBCR_EPD1 (1U << 23)
185 #define TTBCR_IRGN1 (3U << 24)
186 #define TTBCR_ORGN1 (3U << 26)
187 #define TTBCR_SH1 (1U << 28)
188 #define TTBCR_EAE (1U << 31)
190 FIELD(VTCR
, T0SZ
, 0, 6)
191 FIELD(VTCR
, SL0
, 6, 2)
192 FIELD(VTCR
, IRGN0
, 8, 2)
193 FIELD(VTCR
, ORGN0
, 10, 2)
194 FIELD(VTCR
, SH0
, 12, 2)
195 FIELD(VTCR
, TG0
, 14, 2)
196 FIELD(VTCR
, PS
, 16, 3)
197 FIELD(VTCR
, VS
, 19, 1)
198 FIELD(VTCR
, HA
, 21, 1)
199 FIELD(VTCR
, HD
, 22, 1)
200 FIELD(VTCR
, HWU59
, 25, 1)
201 FIELD(VTCR
, HWU60
, 26, 1)
202 FIELD(VTCR
, HWU61
, 27, 1)
203 FIELD(VTCR
, HWU62
, 28, 1)
204 FIELD(VTCR
, NSW
, 29, 1)
205 FIELD(VTCR
, NSA
, 30, 1)
206 FIELD(VTCR
, DS
, 32, 1)
207 FIELD(VTCR
, SL2
, 33, 1)
209 #define HCRX_ENAS0 (1ULL << 0)
210 #define HCRX_ENALS (1ULL << 1)
211 #define HCRX_ENASR (1ULL << 2)
212 #define HCRX_FNXS (1ULL << 3)
213 #define HCRX_FGTNXS (1ULL << 4)
214 #define HCRX_SMPME (1ULL << 5)
215 #define HCRX_TALLINT (1ULL << 6)
216 #define HCRX_VINMI (1ULL << 7)
217 #define HCRX_VFNMI (1ULL << 8)
218 #define HCRX_CMOW (1ULL << 9)
219 #define HCRX_MCE2 (1ULL << 10)
220 #define HCRX_MSCEN (1ULL << 11)
222 #define HPFAR_NS (1ULL << 63)
224 #define HSTR_TTEE (1 << 16)
225 #define HSTR_TJDBX (1 << 17)
228 * Depending on the value of HCR_EL2.E2H, bits 0 and 1
229 * have different bit definitions, and EL1PCTEN might be
230 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to
231 * disambiguate if necessary.
233 FIELD(CNTHCTL
, EL0PCTEN_E2H1
, 0, 1)
234 FIELD(CNTHCTL
, EL0VCTEN_E2H1
, 1, 1)
235 FIELD(CNTHCTL
, EL1PCTEN_E2H0
, 0, 1)
236 FIELD(CNTHCTL
, EL1PCEN_E2H0
, 1, 1)
237 FIELD(CNTHCTL
, EVNTEN
, 2, 1)
238 FIELD(CNTHCTL
, EVNTDIR
, 3, 1)
239 FIELD(CNTHCTL
, EVNTI
, 4, 4)
240 FIELD(CNTHCTL
, EL0VTEN
, 8, 1)
241 FIELD(CNTHCTL
, EL0PTEN
, 9, 1)
242 FIELD(CNTHCTL
, EL1PCTEN_E2H1
, 10, 1)
243 FIELD(CNTHCTL
, EL1PTEN
, 11, 1)
244 FIELD(CNTHCTL
, ECV
, 12, 1)
245 FIELD(CNTHCTL
, EL1TVT
, 13, 1)
246 FIELD(CNTHCTL
, EL1TVCT
, 14, 1)
247 FIELD(CNTHCTL
, EL1NVPCT
, 15, 1)
248 FIELD(CNTHCTL
, EL1NVVCT
, 16, 1)
249 FIELD(CNTHCTL
, EVNTIS
, 17, 1)
250 FIELD(CNTHCTL
, CNTVMASK
, 18, 1)
251 FIELD(CNTHCTL
, CNTPMASK
, 19, 1)
253 /* We use a few fake FSR values for internal purposes in M profile.
254 * M profile cores don't have A/R format FSRs, but currently our
255 * get_phys_addr() code assumes A/R profile and reports failures via
256 * an A/R format FSR value. We then translate that into the proper
257 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
258 * Mostly the FSR values we use for this are those defined for v7PMSA,
259 * since we share some of that codepath. A few kinds of fault are
260 * only for M profile and have no A/R equivalent, though, so we have
261 * to pick a value from the reserved range (which we never otherwise
262 * generate) to use for these.
263 * These values will never be visible to the guest.
265 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
266 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
269 * raise_exception: Raise the specified exception.
270 * Raise a guest exception with the specified value, syndrome register
271 * and target exception level. This should be called from helper functions,
272 * and never returns because we will longjump back up to the CPU main loop.
274 G_NORETURN
void raise_exception(CPUARMState
*env
, uint32_t excp
,
275 uint32_t syndrome
, uint32_t target_el
);
278 * Similarly, but also use unwinding to restore cpu state.
280 G_NORETURN
void raise_exception_ra(CPUARMState
*env
, uint32_t excp
,
281 uint32_t syndrome
, uint32_t target_el
,
285 * For AArch64, map a given EL to an index in the banked_spsr array.
286 * Note that this mapping and the AArch32 mapping defined in bank_number()
287 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
288 * mandated mapping between each other.
290 static inline unsigned int aarch64_banked_spsr_index(unsigned int el
)
292 static const unsigned int map
[4] = {
293 [1] = BANK_SVC
, /* EL1. */
294 [2] = BANK_HYP
, /* EL2. */
295 [3] = BANK_MON
, /* EL3. */
297 assert(el
>= 1 && el
<= 3);
301 /* Map CPU modes onto saved register banks. */
302 static inline int bank_number(int mode
)
305 case ARM_CPU_MODE_USR
:
306 case ARM_CPU_MODE_SYS
:
308 case ARM_CPU_MODE_SVC
:
310 case ARM_CPU_MODE_ABT
:
312 case ARM_CPU_MODE_UND
:
314 case ARM_CPU_MODE_IRQ
:
316 case ARM_CPU_MODE_FIQ
:
318 case ARM_CPU_MODE_HYP
:
320 case ARM_CPU_MODE_MON
:
323 g_assert_not_reached();
327 * r14_bank_number: Map CPU mode onto register bank for r14
329 * Given an AArch32 CPU mode, return the index into the saved register
330 * banks to use for the R14 (LR) in that mode. This is the same as
331 * bank_number(), except for the special case of Hyp mode, where
332 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
333 * This should be used as the index into env->banked_r14[], and
334 * bank_number() used for the index into env->banked_r13[] and
335 * env->banked_spsr[].
337 static inline int r14_bank_number(int mode
)
339 return (mode
== ARM_CPU_MODE_HYP
) ? BANK_USRSYS
: bank_number(mode
);
342 void arm_cpu_register(const ARMCPUInfo
*info
);
343 void aarch64_cpu_register(const ARMCPUInfo
*info
);
345 void register_cp_regs_for_features(ARMCPU
*cpu
);
346 void init_cpreg_list(ARMCPU
*cpu
);
348 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
);
349 void arm_translate_init(void);
351 void arm_restore_state_to_opc(CPUState
*cs
,
352 const TranslationBlock
*tb
,
353 const uint64_t *data
);
356 void arm_cpu_synchronize_from_tb(CPUState
*cs
, const TranslationBlock
*tb
);
357 #endif /* CONFIG_TCG */
359 typedef enum ARMFPRounding
{
368 extern const FloatRoundMode arm_rmode_to_sf_map
[6];
370 static inline FloatRoundMode
arm_rmode_to_sf(ARMFPRounding rmode
)
372 assert((unsigned)rmode
< ARRAY_SIZE(arm_rmode_to_sf_map
));
373 return arm_rmode_to_sf_map
[rmode
];
376 static inline void aarch64_save_sp(CPUARMState
*env
, int el
)
378 if (env
->pstate
& PSTATE_SP
) {
379 env
->sp_el
[el
] = env
->xregs
[31];
381 env
->sp_el
[0] = env
->xregs
[31];
385 static inline void aarch64_restore_sp(CPUARMState
*env
, int el
)
387 if (env
->pstate
& PSTATE_SP
) {
388 env
->xregs
[31] = env
->sp_el
[el
];
390 env
->xregs
[31] = env
->sp_el
[0];
394 static inline void update_spsel(CPUARMState
*env
, uint32_t imm
)
396 unsigned int cur_el
= arm_current_el(env
);
397 /* Update PSTATE SPSel bit; this requires us to update the
398 * working stack pointer in xregs[31].
400 if (!((imm
^ env
->pstate
) & PSTATE_SP
)) {
403 aarch64_save_sp(env
, cur_el
);
404 env
->pstate
= deposit32(env
->pstate
, 0, 1, imm
);
406 /* We rely on illegal updates to SPsel from EL0 to get trapped
407 * at translation time.
409 assert(cur_el
>= 1 && cur_el
<= 3);
410 aarch64_restore_sp(env
, cur_el
);
417 * Returns the implementation defined bit-width of physical addresses.
418 * The ARMv8 reference manuals refer to this as PAMax().
420 unsigned int arm_pamax(ARMCPU
*cpu
);
422 /* Return true if extended addresses are enabled.
423 * This is always the case if our translation regime is 64 bit,
424 * but depends on TTBCR.EAE for 32 bit.
426 static inline bool extended_addresses_enabled(CPUARMState
*env
)
428 uint64_t tcr
= env
->cp15
.tcr_el
[arm_is_secure(env
) ? 3 : 1];
429 if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
430 arm_feature(env
, ARM_FEATURE_V8
)) {
433 return arm_el_is_aa64(env
, 1) ||
434 (arm_feature(env
, ARM_FEATURE_LPAE
) && (tcr
& TTBCR_EAE
));
437 /* Update a QEMU watchpoint based on the information the guest has set in the
438 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
440 void hw_watchpoint_update(ARMCPU
*cpu
, int n
);
441 /* Update the QEMU watchpoints for every guest watchpoint. This does a
442 * complete delete-and-reinstate of the QEMU watchpoint list and so is
443 * suitable for use after migration or on reset.
445 void hw_watchpoint_update_all(ARMCPU
*cpu
);
446 /* Update a QEMU breakpoint based on the information the guest has set in the
447 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
449 void hw_breakpoint_update(ARMCPU
*cpu
, int n
);
450 /* Update the QEMU breakpoints for every guest breakpoint. This does a
451 * complete delete-and-reinstate of the QEMU breakpoint list and so is
452 * suitable for use after migration or on reset.
454 void hw_breakpoint_update_all(ARMCPU
*cpu
);
456 /* Callback function for checking if a breakpoint should trigger. */
457 bool arm_debug_check_breakpoint(CPUState
*cs
);
459 /* Callback function for checking if a watchpoint should trigger. */
460 bool arm_debug_check_watchpoint(CPUState
*cs
, CPUWatchpoint
*wp
);
462 /* Adjust addresses (in BE32 mode) before testing against watchpoint
465 vaddr
arm_adjust_watchpoint_address(CPUState
*cs
, vaddr addr
, int len
);
467 /* Callback function for when a watchpoint or breakpoint triggers. */
468 void arm_debug_excp_handler(CPUState
*cs
);
470 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
471 static inline bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
)
475 static inline void arm_handle_psci_call(ARMCPU
*cpu
)
477 g_assert_not_reached();
480 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
481 bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
);
482 /* Actually handle a PSCI call */
483 void arm_handle_psci_call(ARMCPU
*cpu
);
487 * arm_clear_exclusive: clear the exclusive monitor
489 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
491 static inline void arm_clear_exclusive(CPUARMState
*env
)
493 env
->exclusive_addr
= -1;
497 * ARMFaultType: type of an ARM MMU fault
498 * This corresponds to the v8A pseudocode's Fault enumeration,
499 * with extensions for QEMU internal conditions.
501 typedef enum ARMFaultType
{
508 ARMFault_Translation
,
509 ARMFault_AddressSize
,
510 ARMFault_SyncExternal
,
511 ARMFault_SyncExternalOnWalk
,
513 ARMFault_SyncParityOnWalk
,
514 ARMFault_AsyncParity
,
515 ARMFault_AsyncExternal
,
517 ARMFault_TLBConflict
,
518 ARMFault_UnsuppAtomicUpdate
,
521 ARMFault_ICacheMaint
,
522 ARMFault_QEMU_NSCExec
, /* v8M: NS executing in S&NSC memory */
523 ARMFault_QEMU_SFault
, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
525 ARMFault_GPCFOnOutput
,
528 typedef enum ARMGPCF
{
537 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
538 * @type: Type of fault
539 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
540 * @level: Table walk level (for translation, access flag and permission faults)
541 * @domain: Domain of the fault address (for non-LPAE CPUs only)
542 * @s2addr: Address that caused a fault at stage 2
543 * @paddr: physical address that caused a fault for gpc
544 * @paddr_space: physical address space that caused a fault for gpc
545 * @stage2: True if we faulted at stage 2
546 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
547 * @s1ns: True if we faulted on a non-secure IPA while in secure state
548 * @ea: True if we should set the EA (external abort type) bit in syndrome
550 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo
;
551 struct ARMMMUFaultInfo
{
556 ARMSecuritySpace paddr_space
;
566 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
567 * Compare pseudocode EncodeSDFSC(), though unlike that function
568 * we set up a whole FSR-format code including domain field and
569 * putting the high bit of the FSC into bit 10.
571 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo
*fi
)
578 case ARMFault_AccessFlag
:
579 fsc
= fi
->level
== 1 ? 0x3 : 0x6;
581 case ARMFault_Alignment
:
584 case ARMFault_Permission
:
585 fsc
= fi
->level
== 1 ? 0xd : 0xf;
587 case ARMFault_Domain
:
588 fsc
= fi
->level
== 1 ? 0x9 : 0xb;
590 case ARMFault_Translation
:
591 fsc
= fi
->level
== 1 ? 0x5 : 0x7;
593 case ARMFault_SyncExternal
:
594 fsc
= 0x8 | (fi
->ea
<< 12);
596 case ARMFault_SyncExternalOnWalk
:
597 fsc
= fi
->level
== 1 ? 0xc : 0xe;
598 fsc
|= (fi
->ea
<< 12);
600 case ARMFault_SyncParity
:
603 case ARMFault_SyncParityOnWalk
:
604 fsc
= fi
->level
== 1 ? 0x40c : 0x40e;
606 case ARMFault_AsyncParity
:
609 case ARMFault_AsyncExternal
:
610 fsc
= 0x406 | (fi
->ea
<< 12);
615 case ARMFault_TLBConflict
:
618 case ARMFault_Lockdown
:
621 case ARMFault_Exclusive
:
624 case ARMFault_ICacheMaint
:
627 case ARMFault_Background
:
630 case ARMFault_QEMU_NSCExec
:
631 fsc
= M_FAKE_FSR_NSC_EXEC
;
633 case ARMFault_QEMU_SFault
:
634 fsc
= M_FAKE_FSR_SFAULT
;
637 /* Other faults can't occur in a context that requires a
638 * short-format status code.
640 g_assert_not_reached();
643 fsc
|= (fi
->domain
<< 4);
648 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
649 * Compare pseudocode EncodeLDFSC(), though unlike that function
650 * we fill in also the LPAE bit 9 of a DFSR format.
652 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo
*fi
)
659 case ARMFault_AddressSize
:
660 assert(fi
->level
>= -1 && fi
->level
<= 3);
667 case ARMFault_AccessFlag
:
668 assert(fi
->level
>= 0 && fi
->level
<= 3);
669 fsc
= 0b001000 | fi
->level
;
671 case ARMFault_Permission
:
672 assert(fi
->level
>= 0 && fi
->level
<= 3);
673 fsc
= 0b001100 | fi
->level
;
675 case ARMFault_Translation
:
676 assert(fi
->level
>= -1 && fi
->level
<= 3);
680 fsc
= 0b000100 | fi
->level
;
683 case ARMFault_SyncExternal
:
684 fsc
= 0x10 | (fi
->ea
<< 12);
686 case ARMFault_SyncExternalOnWalk
:
687 assert(fi
->level
>= -1 && fi
->level
<= 3);
691 fsc
= 0b010100 | fi
->level
;
695 case ARMFault_SyncParity
:
698 case ARMFault_SyncParityOnWalk
:
699 assert(fi
->level
>= -1 && fi
->level
<= 3);
703 fsc
= 0b011100 | fi
->level
;
706 case ARMFault_AsyncParity
:
709 case ARMFault_AsyncExternal
:
710 fsc
= 0x11 | (fi
->ea
<< 12);
712 case ARMFault_Alignment
:
718 case ARMFault_TLBConflict
:
721 case ARMFault_UnsuppAtomicUpdate
:
724 case ARMFault_Lockdown
:
727 case ARMFault_Exclusive
:
730 case ARMFault_GPCFOnWalk
:
731 assert(fi
->level
>= -1 && fi
->level
<= 3);
735 fsc
= 0b100100 | fi
->level
;
738 case ARMFault_GPCFOnOutput
:
742 /* Other faults can't occur in a context that requires a
743 * long-format status code.
745 g_assert_not_reached();
752 static inline bool arm_extabort_type(MemTxResult result
)
754 /* The EA bit in syndromes and fault status registers is an
755 * IMPDEF classification of external aborts. ARM implementations
756 * usually use this to indicate AXI bus Decode error (0) or
757 * Slave error (1); in QEMU we follow that.
759 return result
!= MEMTX_DECODE_ERROR
;
762 #ifdef CONFIG_USER_ONLY
763 void arm_cpu_record_sigsegv(CPUState
*cpu
, vaddr addr
,
764 MMUAccessType access_type
,
765 bool maperr
, uintptr_t ra
);
766 void arm_cpu_record_sigbus(CPUState
*cpu
, vaddr addr
,
767 MMUAccessType access_type
, uintptr_t ra
);
769 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
770 MMUAccessType access_type
, int mmu_idx
,
771 bool probe
, uintptr_t retaddr
);
774 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx
)
776 return mmu_idx
& ARM_MMU_IDX_COREIDX_MASK
;
779 static inline ARMMMUIdx
core_to_arm_mmu_idx(CPUARMState
*env
, int mmu_idx
)
781 if (arm_feature(env
, ARM_FEATURE_M
)) {
782 return mmu_idx
| ARM_MMU_IDX_M
;
784 return mmu_idx
| ARM_MMU_IDX_A
;
788 static inline ARMMMUIdx
core_to_aa64_mmu_idx(int mmu_idx
)
790 /* AArch64 is always a-profile. */
791 return mmu_idx
| ARM_MMU_IDX_A
;
794 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
);
796 /* Return the MMU index for a v7M CPU in the specified security state */
797 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
);
800 * Return true if the stage 1 translation regime is using LPAE
803 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
);
805 /* Raise a data fault alignment exception for the specified virtual address */
806 G_NORETURN
void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
807 MMUAccessType access_type
,
808 int mmu_idx
, uintptr_t retaddr
);
810 #ifndef CONFIG_USER_ONLY
811 /* arm_cpu_do_transaction_failed: handle a memory system error response
812 * (eg "no device/memory present at address") by raising an external abort
815 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
816 vaddr addr
, unsigned size
,
817 MMUAccessType access_type
,
818 int mmu_idx
, MemTxAttrs attrs
,
819 MemTxResult response
, uintptr_t retaddr
);
822 /* Call any registered EL change hooks */
823 static inline void arm_call_pre_el_change_hook(ARMCPU
*cpu
)
825 ARMELChangeHook
*hook
, *next
;
826 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
827 hook
->hook(cpu
, hook
->opaque
);
830 static inline void arm_call_el_change_hook(ARMCPU
*cpu
)
832 ARMELChangeHook
*hook
, *next
;
833 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
834 hook
->hook(cpu
, hook
->opaque
);
838 /* Return true if this address translation regime has two ranges. */
839 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx
)
842 case ARMMMUIdx_Stage1_E0
:
843 case ARMMMUIdx_Stage1_E1
:
844 case ARMMMUIdx_Stage1_E1_PAN
:
845 case ARMMMUIdx_E10_0
:
846 case ARMMMUIdx_E10_1
:
847 case ARMMMUIdx_E10_1_PAN
:
848 case ARMMMUIdx_E20_0
:
849 case ARMMMUIdx_E20_2
:
850 case ARMMMUIdx_E20_2_PAN
:
857 static inline bool regime_is_pan(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
860 case ARMMMUIdx_Stage1_E1_PAN
:
861 case ARMMMUIdx_E10_1_PAN
:
862 case ARMMMUIdx_E20_2_PAN
:
869 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx
)
871 return mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
;
874 /* Return the exception level which controls this address translation regime */
875 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
878 case ARMMMUIdx_E20_0
:
879 case ARMMMUIdx_E20_2
:
880 case ARMMMUIdx_E20_2_PAN
:
881 case ARMMMUIdx_Stage2
:
882 case ARMMMUIdx_Stage2_S
:
887 case ARMMMUIdx_E10_0
:
888 case ARMMMUIdx_Stage1_E0
:
889 return arm_el_is_aa64(env
, 3) || !arm_is_secure_below_el3(env
) ? 1 : 3;
890 case ARMMMUIdx_Stage1_E1
:
891 case ARMMMUIdx_Stage1_E1_PAN
:
892 case ARMMMUIdx_E10_1
:
893 case ARMMMUIdx_E10_1_PAN
:
894 case ARMMMUIdx_MPrivNegPri
:
895 case ARMMMUIdx_MUserNegPri
:
896 case ARMMMUIdx_MPriv
:
897 case ARMMMUIdx_MUser
:
898 case ARMMMUIdx_MSPrivNegPri
:
899 case ARMMMUIdx_MSUserNegPri
:
900 case ARMMMUIdx_MSPriv
:
901 case ARMMMUIdx_MSUser
:
904 g_assert_not_reached();
908 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
911 case ARMMMUIdx_E20_0
:
912 case ARMMMUIdx_Stage1_E0
:
913 case ARMMMUIdx_MUser
:
914 case ARMMMUIdx_MSUser
:
915 case ARMMMUIdx_MUserNegPri
:
916 case ARMMMUIdx_MSUserNegPri
:
920 case ARMMMUIdx_E10_0
:
921 case ARMMMUIdx_E10_1
:
922 case ARMMMUIdx_E10_1_PAN
:
923 g_assert_not_reached();
927 /* Return the SCTLR value which controls this address translation regime */
928 static inline uint64_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
930 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
934 * These are the fields in VTCR_EL2 which affect both the Secure stage 2
935 * and the Non-Secure stage 2 translation regimes (and hence which are
936 * not present in VSTCR_EL2).
938 #define VTCR_SHARED_FIELD_MASK \
939 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
940 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
943 /* Return the value of the TCR controlling this translation regime */
944 static inline uint64_t regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
946 if (mmu_idx
== ARMMMUIdx_Stage2
) {
947 return env
->cp15
.vtcr_el2
;
949 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
951 * Secure stage 2 shares fields from VTCR_EL2. We merge those
952 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
953 * value so the callers don't need to special case this.
955 * If a future architecture change defines bits in VSTCR_EL2 that
956 * overlap with these VTCR_EL2 fields we may need to revisit this.
958 uint64_t v
= env
->cp15
.vstcr_el2
& ~VTCR_SHARED_FIELD_MASK
;
959 v
|= env
->cp15
.vtcr_el2
& VTCR_SHARED_FIELD_MASK
;
962 return env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
965 /* Return true if the translation regime is using LPAE format page tables */
966 static inline bool regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
968 int el
= regime_el(env
, mmu_idx
);
969 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
972 if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
973 arm_feature(env
, ARM_FEATURE_V8
)) {
976 if (arm_feature(env
, ARM_FEATURE_LPAE
)
977 && (regime_tcr(env
, mmu_idx
) & TTBCR_EAE
)) {
984 * arm_num_brps: Return number of implemented breakpoints.
985 * Note that the ID register BRPS field is "number of bps - 1",
986 * and we return the actual number of breakpoints.
988 static inline int arm_num_brps(ARMCPU
*cpu
)
990 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
991 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, BRPS
) + 1;
993 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, BRPS
) + 1;
998 * arm_num_wrps: Return number of implemented watchpoints.
999 * Note that the ID register WRPS field is "number of wps - 1",
1000 * and we return the actual number of watchpoints.
1002 static inline int arm_num_wrps(ARMCPU
*cpu
)
1004 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1005 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, WRPS
) + 1;
1007 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, WRPS
) + 1;
1012 * arm_num_ctx_cmps: Return number of implemented context comparators.
1013 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
1014 * and we return the actual number of comparators.
1016 static inline int arm_num_ctx_cmps(ARMCPU
*cpu
)
1018 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1019 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, CTX_CMPS
) + 1;
1021 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, CTX_CMPS
) + 1;
1026 * v7m_using_psp: Return true if using process stack pointer
1027 * Return true if the CPU is currently using the process stack
1028 * pointer, or false if it is using the main stack pointer.
1030 static inline bool v7m_using_psp(CPUARMState
*env
)
1032 /* Handler mode always uses the main stack; for thread mode
1033 * the CONTROL.SPSEL bit determines the answer.
1034 * Note that in v7M it is not possible to be in Handler mode with
1035 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
1037 return !arm_v7m_is_handler_mode(env
) &&
1038 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
1042 * v7m_sp_limit: Return SP limit for current CPU state
1043 * Return the SP limit value for the current CPU security state
1044 * and stack pointer.
1046 static inline uint32_t v7m_sp_limit(CPUARMState
*env
)
1048 if (v7m_using_psp(env
)) {
1049 return env
->v7m
.psplim
[env
->v7m
.secure
];
1051 return env
->v7m
.msplim
[env
->v7m
.secure
];
1057 * Return true if the v7M CPACR permits access to the FPU for the specified
1058 * security state and privilege level.
1060 static inline bool v7m_cpacr_pass(CPUARMState
*env
,
1061 bool is_secure
, bool is_priv
)
1063 switch (extract32(env
->v7m
.cpacr
[is_secure
], 20, 2)) {
1065 case 2: /* UNPREDICTABLE: we treat like 0 */
1072 g_assert_not_reached();
1077 * aarch32_mode_name(): Return name of the AArch32 CPU mode
1078 * @psr: Program Status Register indicating CPU mode
1080 * Returns, for debug logging purposes, a printable representation
1081 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
1082 * the low bits of the specified PSR.
1084 static inline const char *aarch32_mode_name(uint32_t psr
)
1086 static const char cpu_mode_names
[16][4] = {
1087 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
1088 "???", "???", "hyp", "und", "???", "???", "???", "sys"
1091 return cpu_mode_names
[psr
& 0xf];
1095 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1097 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1098 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
1099 * Must be called with the BQL held.
1101 void arm_cpu_update_virq(ARMCPU
*cpu
);
1104 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1106 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1107 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
1108 * Must be called with the BQL held.
1110 void arm_cpu_update_vfiq(ARMCPU
*cpu
);
1113 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
1115 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
1116 * following a change to the HCR_EL2.VSE bit.
1118 void arm_cpu_update_vserr(ARMCPU
*cpu
);
1122 * @env: The cpu environment
1123 * @el: The EL to use.
1125 * Return the full ARMMMUIdx for the translation regime for EL.
1127 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
);
1131 * @env: The cpu environment
1133 * Return the full ARMMMUIdx for the current translation regime.
1135 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
);
1138 * arm_stage1_mmu_idx:
1139 * @env: The cpu environment
1141 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
1143 #ifdef CONFIG_USER_ONLY
1144 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
1146 return ARMMMUIdx_Stage1_E0
;
1148 static inline ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
1150 return ARMMMUIdx_Stage1_E0
;
1153 ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
);
1154 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
);
1158 * arm_mmu_idx_is_stage1_of_2:
1159 * @mmu_idx: The ARMMMUIdx to test
1161 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
1162 * first stage of a two stage regime.
1164 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx
)
1167 case ARMMMUIdx_Stage1_E0
:
1168 case ARMMMUIdx_Stage1_E1
:
1169 case ARMMMUIdx_Stage1_E1_PAN
:
1176 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features
,
1177 const ARMISARegisters
*id
)
1179 uint32_t valid
= CPSR_M
| CPSR_AIF
| CPSR_IL
| CPSR_NZCV
;
1181 if ((features
>> ARM_FEATURE_V4T
) & 1) {
1184 if ((features
>> ARM_FEATURE_V5
) & 1) {
1185 valid
|= CPSR_Q
; /* V5TE in reality*/
1187 if ((features
>> ARM_FEATURE_V6
) & 1) {
1188 valid
|= CPSR_E
| CPSR_GE
;
1190 if ((features
>> ARM_FEATURE_THUMB2
) & 1) {
1193 if (isar_feature_aa32_jazelle(id
)) {
1196 if (isar_feature_aa32_pan(id
)) {
1199 if (isar_feature_aa32_dit(id
)) {
1202 if (isar_feature_aa32_ssbs(id
)) {
1209 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters
*id
)
1213 valid
= PSTATE_M
| PSTATE_DAIF
| PSTATE_IL
| PSTATE_SS
| PSTATE_NZCV
;
1214 if (isar_feature_aa64_bti(id
)) {
1215 valid
|= PSTATE_BTYPE
;
1217 if (isar_feature_aa64_pan(id
)) {
1218 valid
|= PSTATE_PAN
;
1220 if (isar_feature_aa64_uao(id
)) {
1221 valid
|= PSTATE_UAO
;
1223 if (isar_feature_aa64_dit(id
)) {
1224 valid
|= PSTATE_DIT
;
1226 if (isar_feature_aa64_ssbs(id
)) {
1227 valid
|= PSTATE_SSBS
;
1229 if (isar_feature_aa64_mte(id
)) {
1230 valid
|= PSTATE_TCO
;
1236 /* Granule size (i.e. page size) */
1237 typedef enum ARMGranuleSize
{
1238 /* Same order as TG0 encoding */
1246 * arm_granule_bits: Return address size of the granule in bits
1248 * Return the address size of the granule in bits. This corresponds
1249 * to the pseudocode TGxGranuleBits().
1251 static inline int arm_granule_bits(ARMGranuleSize gran
)
1261 g_assert_not_reached();
1266 * Parameters of a given virtual address, as extracted from the
1267 * translation control register (TCR) for a given regime.
1269 typedef struct ARMVAParameters
{
1273 unsigned select
: 1;
1277 bool tsz_oob
: 1; /* tsz has been clamped to legal range */
1281 ARMGranuleSize gran
: 2;
1285 * aa64_va_parameters: Return parameters for an AArch64 virtual address
1287 * @va: virtual address to look up
1288 * @mmu_idx: determines translation regime to use
1289 * @data: true if this is a data access
1290 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1291 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1293 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
1294 ARMMMUIdx mmu_idx
, bool data
,
1297 int aa64_va_parameter_tbi(uint64_t tcr
, ARMMMUIdx mmu_idx
);
1298 int aa64_va_parameter_tbid(uint64_t tcr
, ARMMMUIdx mmu_idx
);
1299 int aa64_va_parameter_tcma(uint64_t tcr
, ARMMMUIdx mmu_idx
);
1301 /* Determine if allocation tags are available. */
1302 static inline bool allocation_tag_access_enabled(CPUARMState
*env
, int el
,
1306 && arm_feature(env
, ARM_FEATURE_EL3
)
1307 && !(env
->cp15
.scr_el3
& SCR_ATA
)) {
1310 if (el
< 2 && arm_is_el2_enabled(env
)) {
1311 uint64_t hcr
= arm_hcr_el2_eff(env
);
1312 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
1316 sctlr
&= (el
== 0 ? SCTLR_ATA0
: SCTLR_ATA
);
1320 #ifndef CONFIG_USER_ONLY
1322 /* Security attributes for an address, as returned by v8m_security_lookup. */
1323 typedef struct V8M_SAttributes
{
1324 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
1333 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
1334 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1335 bool secure
, V8M_SAttributes
*sattrs
);
1337 /* Cacheability and shareability attributes for a memory access */
1338 typedef struct ARMCacheAttrs
{
1340 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1341 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1343 unsigned int attrs
:8;
1344 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
1345 bool is_s2_format
:1;
1348 /* Fields that are valid upon success. */
1349 typedef struct GetPhysAddrResult
{
1351 ARMCacheAttrs cacheattrs
;
1352 } GetPhysAddrResult
;
1355 * get_phys_addr: get the physical address for a virtual address
1357 * @address: virtual address to get physical address for
1358 * @access_type: 0 for read, 1 for write, 2 for execute
1359 * @mmu_idx: MMU index indicating required translation regime
1360 * @result: set on translation success.
1361 * @fi: set to fault info if the translation fails
1363 * Find the physical address corresponding to the given virtual address,
1364 * by doing a translation table walk on MMU based systems or using the
1365 * MPU state on MPU based systems.
1367 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1368 * prot and page_size may not be filled in, and the populated fsr value provides
1369 * information on why the translation aborted, in the format of a
1370 * DFSR/IFSR fault register, with the following caveats:
1371 * * we honour the short vs long DFSR format differences.
1372 * * the WnR bit is never set (the caller must do this).
1373 * * for PSMAv5 based systems we don't bother to return a full FSR format
1376 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
1377 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1378 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
1379 __attribute__((nonnull
));
1382 * get_phys_addr_with_space_nogpc: get the physical address for a virtual
1385 * @address: virtual address to get physical address for
1386 * @access_type: 0 for read, 1 for write, 2 for execute
1387 * @mmu_idx: MMU index indicating required translation regime
1388 * @space: security space for the access
1389 * @result: set on translation success.
1390 * @fi: set to fault info if the translation fails
1392 * Similar to get_phys_addr, but use the given security space and don't perform
1393 * a Granule Protection Check on the resulting address.
1395 bool get_phys_addr_with_space_nogpc(CPUARMState
*env
, target_ulong address
,
1396 MMUAccessType access_type
,
1397 ARMMMUIdx mmu_idx
, ARMSecuritySpace space
,
1398 GetPhysAddrResult
*result
,
1399 ARMMMUFaultInfo
*fi
)
1400 __attribute__((nonnull
));
1402 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
1403 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1404 bool is_secure
, GetPhysAddrResult
*result
,
1405 ARMMMUFaultInfo
*fi
, uint32_t *mregion
);
1407 void arm_log_exception(CPUState
*cs
);
1409 #endif /* !CONFIG_USER_ONLY */
1412 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1413 * the same simd_desc() encoding due to restrictions on size.
1414 * Use these instead.
1416 FIELD(PREDDESC
, OPRSZ
, 0, 6)
1417 FIELD(PREDDESC
, ESZ
, 6, 2)
1418 FIELD(PREDDESC
, DATA
, 8, 24)
1421 * The SVE simd_data field, for memory ops, contains either
1422 * rd (5 bits) or a shift count (2 bits).
1424 #define SVE_MTEDESC_SHIFT 5
1426 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1427 FIELD(MTEDESC
, MIDX
, 0, 4)
1428 FIELD(MTEDESC
, TBI
, 4, 2)
1429 FIELD(MTEDESC
, TCMA
, 6, 2)
1430 FIELD(MTEDESC
, WRITE
, 8, 1)
1431 FIELD(MTEDESC
, ALIGN
, 9, 3)
1432 FIELD(MTEDESC
, SIZEM1
, 12, SIMD_DATA_BITS
- SVE_MTEDESC_SHIFT
- 12) /* size - 1 */
1434 bool mte_probe(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
);
1435 uint64_t mte_check(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
, uintptr_t ra
);
1438 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
1440 * @ptr: start address of memory region (dirty pointer)
1441 * @size: length of region (guaranteed not to cross a page boundary)
1442 * @desc: MTEDESC descriptor word (0 means no MTE checks)
1443 * Returns: the size of the region that can be copied without hitting
1444 * an MTE tag failure
1446 * Note that we assume that the caller has already checked the TBI
1447 * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1450 uint64_t mte_mops_probe(CPUARMState
*env
, uint64_t ptr
, uint64_t size
,
1454 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
1455 * operation going in the reverse direction
1457 * @ptr: *end* address of memory region (dirty pointer)
1458 * @size: length of region (guaranteed not to cross a page boundary)
1459 * @desc: MTEDESC descriptor word (0 means no MTE checks)
1460 * Returns: the size of the region that can be copied without hitting
1461 * an MTE tag failure
1463 * Note that we assume that the caller has already checked the TBI
1464 * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1467 uint64_t mte_mops_probe_rev(CPUARMState
*env
, uint64_t ptr
, uint64_t size
,
1471 * mte_check_fail: Record an MTE tag check failure
1473 * @desc: MTEDESC descriptor word
1474 * @dirty_ptr: Failing dirty address
1477 * This may never return (if the MTE tag checks are configured to fault).
1479 void mte_check_fail(CPUARMState
*env
, uint32_t desc
,
1480 uint64_t dirty_ptr
, uintptr_t ra
);
1483 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
1485 * @dirty_ptr: Start address of memory region (dirty pointer)
1486 * @size: length of region (guaranteed not to cross page boundary)
1487 * @desc: MTEDESC descriptor word
1489 void mte_mops_set_tags(CPUARMState
*env
, uint64_t dirty_ptr
, uint64_t size
,
1492 static inline int allocation_tag_from_addr(uint64_t ptr
)
1494 return extract64(ptr
, 56, 4);
1497 static inline uint64_t address_with_allocation_tag(uint64_t ptr
, int rtag
)
1499 return deposit64(ptr
, 56, 4, rtag
);
1502 /* Return true if tbi bits mean that the access is checked. */
1503 static inline bool tbi_check(uint32_t desc
, int bit55
)
1505 return (desc
>> (R_MTEDESC_TBI_SHIFT
+ bit55
)) & 1;
1508 /* Return true if tcma bits mean that the access is unchecked. */
1509 static inline bool tcma_check(uint32_t desc
, int bit55
, int ptr_tag
)
1512 * We had extracted bit55 and ptr_tag for other reasons, so fold
1513 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1515 bool match
= ((ptr_tag
+ bit55
) & 0xf) == 0;
1516 bool tcma
= (desc
>> (R_MTEDESC_TCMA_SHIFT
+ bit55
)) & 1;
1517 return tcma
&& match
;
1521 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1522 * for the tag to be present in the FAR_ELx register. But for user-only
1523 * mode, we do not have a TLB with which to implement this, so we must
1524 * remove the top byte.
1526 static inline uint64_t useronly_clean_ptr(uint64_t ptr
)
1528 #ifdef CONFIG_USER_ONLY
1529 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1530 ptr
&= sextract64(ptr
, 0, 56);
1535 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc
, uint64_t ptr
)
1537 #ifdef CONFIG_USER_ONLY
1538 int64_t clean_ptr
= sextract64(ptr
, 0, 56);
1539 if (tbi_check(desc
, clean_ptr
< 0)) {
1546 /* Values for M-profile PSR.ECI for MVE insns */
1548 ECI_NONE
= 0, /* No completed beats */
1549 ECI_A0
= 1, /* Completed: A0 */
1550 ECI_A0A1
= 2, /* Completed: A0, A1 */
1552 ECI_A0A1A2
= 4, /* Completed: A0, A1, A2 */
1553 ECI_A0A1A2B0
= 5, /* Completed: A0, A1, A2, B0 */
1554 /* All other values reserved */
1557 /* Definitions for the PMU registers */
1558 #define PMCRN_MASK 0xf800
1559 #define PMCRN_SHIFT 11
1569 * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1570 * which can be written as 1 to trigger behaviour but which stay RAZ).
1572 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1574 #define PMXEVTYPER_P 0x80000000
1575 #define PMXEVTYPER_U 0x40000000
1576 #define PMXEVTYPER_NSK 0x20000000
1577 #define PMXEVTYPER_NSU 0x10000000
1578 #define PMXEVTYPER_NSH 0x08000000
1579 #define PMXEVTYPER_M 0x04000000
1580 #define PMXEVTYPER_MT 0x02000000
1581 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1582 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1583 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1584 PMXEVTYPER_M | PMXEVTYPER_MT | \
1585 PMXEVTYPER_EVTCOUNT)
1587 #define PMCCFILTR 0xf8000000
1588 #define PMCCFILTR_M PMXEVTYPER_M
1589 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1591 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1593 ARMCPU
*cpu
= env_archcpu(env
);
1595 return (cpu
->isar
.reset_pmcr_el0
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1598 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1599 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1601 return (1ULL << 31) | ((1ULL << pmu_num_counters(env
)) - 1);
1604 #ifdef TARGET_AARCH64
1605 GDBFeature
*arm_gen_dynamic_svereg_feature(CPUState
*cpu
, int base_reg
);
1606 int aarch64_gdb_get_sve_reg(CPUState
*cs
, GByteArray
*buf
, int reg
);
1607 int aarch64_gdb_set_sve_reg(CPUState
*cs
, uint8_t *buf
, int reg
);
1608 int aarch64_gdb_get_fpu_reg(CPUState
*cs
, GByteArray
*buf
, int reg
);
1609 int aarch64_gdb_set_fpu_reg(CPUState
*cs
, uint8_t *buf
, int reg
);
1610 int aarch64_gdb_get_pauth_reg(CPUState
*cs
, GByteArray
*buf
, int reg
);
1611 int aarch64_gdb_set_pauth_reg(CPUState
*cs
, uint8_t *buf
, int reg
);
1612 void arm_cpu_sve_finalize(ARMCPU
*cpu
, Error
**errp
);
1613 void arm_cpu_sme_finalize(ARMCPU
*cpu
, Error
**errp
);
1614 void arm_cpu_pauth_finalize(ARMCPU
*cpu
, Error
**errp
);
1615 void arm_cpu_lpa2_finalize(ARMCPU
*cpu
, Error
**errp
);
1616 void aarch64_max_tcg_initfn(Object
*obj
);
1617 void aarch64_add_pauth_properties(Object
*obj
);
1618 void aarch64_add_sve_properties(Object
*obj
);
1619 void aarch64_add_sme_properties(Object
*obj
);
1622 /* Read the CONTROL register as the MRS instruction would. */
1623 uint32_t arm_v7m_mrs_control(CPUARMState
*env
, uint32_t secure
);
1626 * Return a pointer to the location where we currently store the
1627 * stack pointer for the requested security state and thread mode.
1628 * This pointer will become invalid if the CPU state is updated
1629 * such that the stack pointers are switched around (eg changing
1630 * the SPSEL control bit).
1632 uint32_t *arm_v7m_get_sp_ptr(CPUARMState
*env
, bool secure
,
1633 bool threadmode
, bool spsel
);
1635 bool el_is_in_host(CPUARMState
*env
, int el
);
1637 void aa32_max_features(ARMCPU
*cpu
);
1638 int exception_target_el(CPUARMState
*env
);
1639 bool arm_singlestep_active(CPUARMState
*env
);
1640 bool arm_generate_debug_exceptions(CPUARMState
*env
);
1644 * @param: parameters defining the MMU setup
1646 * Return a mask of the address bits that contain the authentication code,
1647 * given the MMU config defined by @param.
1649 static inline uint64_t pauth_ptr_mask(ARMVAParameters param
)
1651 int bot_pac_bit
= 64 - param
.tsz
;
1652 int top_pac_bit
= 64 - 8 * param
.tbi
;
1654 return MAKE_64BIT_MASK(bot_pac_bit
, top_pac_bit
- bot_pac_bit
);
1657 /* Add the cpreg definitions for debug related system registers */
1658 void define_debug_regs(ARMCPU
*cpu
);
1660 /* Effective value of MDCR_EL2 */
1661 static inline uint64_t arm_mdcr_el2_eff(CPUARMState
*env
)
1663 return arm_is_el2_enabled(env
) ? env
->cp15
.mdcr_el2
: 0;
1666 /* Powers of 2 for sve_vq_map et al. */
1667 #define SVE_VQ_POW2_MAP \
1668 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1669 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1672 * Return true if it is possible to take a fine-grained-trap to EL2.
1674 static inline bool arm_fgt_active(CPUARMState
*env
, int el
)
1677 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1678 * that can affect EL0, but it is harmless to do the test also for
1679 * traps on registers that are only accessible at EL1 because if the test
1680 * returns true then we can't be executing at EL1 anyway.
1681 * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1682 * traps from AArch32 only happen for the EL0 is AArch32 case.
1684 return cpu_isar_feature(aa64_fgt
, env_archcpu(env
)) &&
1685 el
< 2 && arm_is_el2_enabled(env
) &&
1686 arm_el_is_aa64(env
, 1) &&
1687 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
) &&
1688 (!arm_feature(env
, ARM_FEATURE_EL3
) || (env
->cp15
.scr_el3
& SCR_FGTEN
));
1691 void assert_hflags_rebuild_correctly(CPUARMState
*env
);
1694 * Although the ARM implementation of hardware assisted debugging
1695 * allows for different breakpoints per-core, the current GDB
1696 * interface treats them as a global pool of registers (which seems to
1697 * be the case for x86, ppc and s390). As a result we store one copy
1698 * of registers which is used for all active cores.
1700 * Write access is serialised by virtue of the GDB protocol which
1701 * updates things. Read access (i.e. when the values are copied to the
1702 * vCPU) is also gated by GDB's run control.
1704 * This is not unreasonable as most of the time debugging kernels you
1705 * never know which core will eventually execute your function.
1714 * The watchpoint registers can cover more area than the requested
1715 * watchpoint so we need to store the additional information
1716 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1717 * when the watchpoint is hit.
1722 CPUWatchpoint details
;
1725 /* Maximum and current break/watch point counts */
1726 extern int max_hw_bps
, max_hw_wps
;
1727 extern GArray
*hw_breakpoints
, *hw_watchpoints
;
1729 #define cur_hw_wps (hw_watchpoints->len)
1730 #define cur_hw_bps (hw_breakpoints->len)
1731 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1732 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1734 bool find_hw_breakpoint(CPUState
*cpu
, target_ulong pc
);
1735 int insert_hw_breakpoint(target_ulong pc
);
1736 int delete_hw_breakpoint(target_ulong pc
);
1738 bool check_watchpoint_in_range(int i
, target_ulong addr
);
1739 CPUWatchpoint
*find_hw_watchpoint(CPUState
*cpu
, target_ulong addr
);
1740 int insert_hw_watchpoint(target_ulong addr
, target_ulong len
, int type
);
1741 int delete_hw_watchpoint(target_ulong addr
, target_ulong len
, int type
);