2 * QEMU ARM CPU -- internal functions and types
4 * Copyright (c) 2014 Linaro Ltd
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
28 #include "hw/registerfields.h"
29 #include "tcg/tcg-gvec-desc.h"
31 #include "cpu-features.h"
33 /* register banks for CPU modes */
43 static inline bool excp_is_internal(int excp
)
45 /* Return true if this exception number represents a QEMU-internal
46 * exception that will not be passed to the guest.
48 return excp
== EXCP_INTERRUPT
51 || excp
== EXCP_HALTED
52 || excp
== EXCP_EXCEPTION_EXIT
53 || excp
== EXCP_KERNEL_TRAP
54 || excp
== EXCP_SEMIHOST
;
57 /* Scale factor for generic timers, ie number of ns per tick.
58 * This gives a 62.5MHz timer.
60 #define GTIMER_SCALE 16
62 /* Bit definitions for the v7M CONTROL register */
63 FIELD(V7M_CONTROL
, NPRIV
, 0, 1)
64 FIELD(V7M_CONTROL
, SPSEL
, 1, 1)
65 FIELD(V7M_CONTROL
, FPCA
, 2, 1)
66 FIELD(V7M_CONTROL
, SFPA
, 3, 1)
68 /* Bit definitions for v7M exception return payload */
69 FIELD(V7M_EXCRET
, ES
, 0, 1)
70 FIELD(V7M_EXCRET
, RES0
, 1, 1)
71 FIELD(V7M_EXCRET
, SPSEL
, 2, 1)
72 FIELD(V7M_EXCRET
, MODE
, 3, 1)
73 FIELD(V7M_EXCRET
, FTYPE
, 4, 1)
74 FIELD(V7M_EXCRET
, DCRS
, 5, 1)
75 FIELD(V7M_EXCRET
, S
, 6, 1)
76 FIELD(V7M_EXCRET
, RES1
, 7, 25) /* including the must-be-1 prefix */
78 /* Minimum value which is a magic number for exception return */
79 #define EXC_RETURN_MIN_MAGIC 0xff000000
80 /* Minimum number which is a magic number for function or exception return
81 * when using v8M security extension
83 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
85 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
86 FIELD(DBGWCR
, E
, 0, 1)
87 FIELD(DBGWCR
, PAC
, 1, 2)
88 FIELD(DBGWCR
, LSC
, 3, 2)
89 FIELD(DBGWCR
, BAS
, 5, 8)
90 FIELD(DBGWCR
, HMC
, 13, 1)
91 FIELD(DBGWCR
, SSC
, 14, 2)
92 FIELD(DBGWCR
, LBN
, 16, 4)
93 FIELD(DBGWCR
, WT
, 20, 1)
94 FIELD(DBGWCR
, MASK
, 24, 5)
95 FIELD(DBGWCR
, SSCE
, 29, 1)
97 /* We use a few fake FSR values for internal purposes in M profile.
98 * M profile cores don't have A/R format FSRs, but currently our
99 * get_phys_addr() code assumes A/R profile and reports failures via
100 * an A/R format FSR value. We then translate that into the proper
101 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
102 * Mostly the FSR values we use for this are those defined for v7PMSA,
103 * since we share some of that codepath. A few kinds of fault are
104 * only for M profile and have no A/R equivalent, though, so we have
105 * to pick a value from the reserved range (which we never otherwise
106 * generate) to use for these.
107 * These values will never be visible to the guest.
109 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
110 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
113 * raise_exception: Raise the specified exception.
114 * Raise a guest exception with the specified value, syndrome register
115 * and target exception level. This should be called from helper functions,
116 * and never returns because we will longjump back up to the CPU main loop.
118 G_NORETURN
void raise_exception(CPUARMState
*env
, uint32_t excp
,
119 uint32_t syndrome
, uint32_t target_el
);
122 * Similarly, but also use unwinding to restore cpu state.
124 G_NORETURN
void raise_exception_ra(CPUARMState
*env
, uint32_t excp
,
125 uint32_t syndrome
, uint32_t target_el
,
129 * For AArch64, map a given EL to an index in the banked_spsr array.
130 * Note that this mapping and the AArch32 mapping defined in bank_number()
131 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
132 * mandated mapping between each other.
134 static inline unsigned int aarch64_banked_spsr_index(unsigned int el
)
136 static const unsigned int map
[4] = {
137 [1] = BANK_SVC
, /* EL1. */
138 [2] = BANK_HYP
, /* EL2. */
139 [3] = BANK_MON
, /* EL3. */
141 assert(el
>= 1 && el
<= 3);
145 /* Map CPU modes onto saved register banks. */
146 static inline int bank_number(int mode
)
149 case ARM_CPU_MODE_USR
:
150 case ARM_CPU_MODE_SYS
:
152 case ARM_CPU_MODE_SVC
:
154 case ARM_CPU_MODE_ABT
:
156 case ARM_CPU_MODE_UND
:
158 case ARM_CPU_MODE_IRQ
:
160 case ARM_CPU_MODE_FIQ
:
162 case ARM_CPU_MODE_HYP
:
164 case ARM_CPU_MODE_MON
:
167 g_assert_not_reached();
171 * r14_bank_number: Map CPU mode onto register bank for r14
173 * Given an AArch32 CPU mode, return the index into the saved register
174 * banks to use for the R14 (LR) in that mode. This is the same as
175 * bank_number(), except for the special case of Hyp mode, where
176 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
177 * This should be used as the index into env->banked_r14[], and
178 * bank_number() used for the index into env->banked_r13[] and
179 * env->banked_spsr[].
181 static inline int r14_bank_number(int mode
)
183 return (mode
== ARM_CPU_MODE_HYP
) ? BANK_USRSYS
: bank_number(mode
);
186 void arm_cpu_register(const ARMCPUInfo
*info
);
187 void aarch64_cpu_register(const ARMCPUInfo
*info
);
189 void register_cp_regs_for_features(ARMCPU
*cpu
);
190 void init_cpreg_list(ARMCPU
*cpu
);
192 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
);
193 void arm_translate_init(void);
195 void arm_restore_state_to_opc(CPUState
*cs
,
196 const TranslationBlock
*tb
,
197 const uint64_t *data
);
200 void arm_cpu_synchronize_from_tb(CPUState
*cs
, const TranslationBlock
*tb
);
201 #endif /* CONFIG_TCG */
203 typedef enum ARMFPRounding
{
212 extern const FloatRoundMode arm_rmode_to_sf_map
[6];
214 static inline FloatRoundMode
arm_rmode_to_sf(ARMFPRounding rmode
)
216 assert((unsigned)rmode
< ARRAY_SIZE(arm_rmode_to_sf_map
));
217 return arm_rmode_to_sf_map
[rmode
];
220 static inline void aarch64_save_sp(CPUARMState
*env
, int el
)
222 if (env
->pstate
& PSTATE_SP
) {
223 env
->sp_el
[el
] = env
->xregs
[31];
225 env
->sp_el
[0] = env
->xregs
[31];
229 static inline void aarch64_restore_sp(CPUARMState
*env
, int el
)
231 if (env
->pstate
& PSTATE_SP
) {
232 env
->xregs
[31] = env
->sp_el
[el
];
234 env
->xregs
[31] = env
->sp_el
[0];
238 static inline void update_spsel(CPUARMState
*env
, uint32_t imm
)
240 unsigned int cur_el
= arm_current_el(env
);
241 /* Update PSTATE SPSel bit; this requires us to update the
242 * working stack pointer in xregs[31].
244 if (!((imm
^ env
->pstate
) & PSTATE_SP
)) {
247 aarch64_save_sp(env
, cur_el
);
248 env
->pstate
= deposit32(env
->pstate
, 0, 1, imm
);
250 /* We rely on illegal updates to SPsel from EL0 to get trapped
251 * at translation time.
253 assert(cur_el
>= 1 && cur_el
<= 3);
254 aarch64_restore_sp(env
, cur_el
);
261 * Returns the implementation defined bit-width of physical addresses.
262 * The ARMv8 reference manuals refer to this as PAMax().
264 unsigned int arm_pamax(ARMCPU
*cpu
);
266 /* Return true if extended addresses are enabled.
267 * This is always the case if our translation regime is 64 bit,
268 * but depends on TTBCR.EAE for 32 bit.
270 static inline bool extended_addresses_enabled(CPUARMState
*env
)
272 uint64_t tcr
= env
->cp15
.tcr_el
[arm_is_secure(env
) ? 3 : 1];
273 if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
274 arm_feature(env
, ARM_FEATURE_V8
)) {
277 return arm_el_is_aa64(env
, 1) ||
278 (arm_feature(env
, ARM_FEATURE_LPAE
) && (tcr
& TTBCR_EAE
));
281 /* Update a QEMU watchpoint based on the information the guest has set in the
282 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
284 void hw_watchpoint_update(ARMCPU
*cpu
, int n
);
285 /* Update the QEMU watchpoints for every guest watchpoint. This does a
286 * complete delete-and-reinstate of the QEMU watchpoint list and so is
287 * suitable for use after migration or on reset.
289 void hw_watchpoint_update_all(ARMCPU
*cpu
);
290 /* Update a QEMU breakpoint based on the information the guest has set in the
291 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
293 void hw_breakpoint_update(ARMCPU
*cpu
, int n
);
294 /* Update the QEMU breakpoints for every guest breakpoint. This does a
295 * complete delete-and-reinstate of the QEMU breakpoint list and so is
296 * suitable for use after migration or on reset.
298 void hw_breakpoint_update_all(ARMCPU
*cpu
);
300 /* Callback function for checking if a breakpoint should trigger. */
301 bool arm_debug_check_breakpoint(CPUState
*cs
);
303 /* Callback function for checking if a watchpoint should trigger. */
304 bool arm_debug_check_watchpoint(CPUState
*cs
, CPUWatchpoint
*wp
);
306 /* Adjust addresses (in BE32 mode) before testing against watchpoint
309 vaddr
arm_adjust_watchpoint_address(CPUState
*cs
, vaddr addr
, int len
);
311 /* Callback function for when a watchpoint or breakpoint triggers. */
312 void arm_debug_excp_handler(CPUState
*cs
);
314 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
315 static inline bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
)
319 static inline void arm_handle_psci_call(ARMCPU
*cpu
)
321 g_assert_not_reached();
324 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
325 bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
);
326 /* Actually handle a PSCI call */
327 void arm_handle_psci_call(ARMCPU
*cpu
);
331 * arm_clear_exclusive: clear the exclusive monitor
333 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
335 static inline void arm_clear_exclusive(CPUARMState
*env
)
337 env
->exclusive_addr
= -1;
341 * ARMFaultType: type of an ARM MMU fault
342 * This corresponds to the v8A pseudocode's Fault enumeration,
343 * with extensions for QEMU internal conditions.
345 typedef enum ARMFaultType
{
352 ARMFault_Translation
,
353 ARMFault_AddressSize
,
354 ARMFault_SyncExternal
,
355 ARMFault_SyncExternalOnWalk
,
357 ARMFault_SyncParityOnWalk
,
358 ARMFault_AsyncParity
,
359 ARMFault_AsyncExternal
,
361 ARMFault_TLBConflict
,
362 ARMFault_UnsuppAtomicUpdate
,
365 ARMFault_ICacheMaint
,
366 ARMFault_QEMU_NSCExec
, /* v8M: NS executing in S&NSC memory */
367 ARMFault_QEMU_SFault
, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
369 ARMFault_GPCFOnOutput
,
372 typedef enum ARMGPCF
{
381 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
382 * @type: Type of fault
383 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
384 * @level: Table walk level (for translation, access flag and permission faults)
385 * @domain: Domain of the fault address (for non-LPAE CPUs only)
386 * @s2addr: Address that caused a fault at stage 2
387 * @paddr: physical address that caused a fault for gpc
388 * @paddr_space: physical address space that caused a fault for gpc
389 * @stage2: True if we faulted at stage 2
390 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
391 * @s1ns: True if we faulted on a non-secure IPA while in secure state
392 * @ea: True if we should set the EA (external abort type) bit in syndrome
394 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo
;
395 struct ARMMMUFaultInfo
{
400 ARMSecuritySpace paddr_space
;
410 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
411 * Compare pseudocode EncodeSDFSC(), though unlike that function
412 * we set up a whole FSR-format code including domain field and
413 * putting the high bit of the FSC into bit 10.
415 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo
*fi
)
422 case ARMFault_AccessFlag
:
423 fsc
= fi
->level
== 1 ? 0x3 : 0x6;
425 case ARMFault_Alignment
:
428 case ARMFault_Permission
:
429 fsc
= fi
->level
== 1 ? 0xd : 0xf;
431 case ARMFault_Domain
:
432 fsc
= fi
->level
== 1 ? 0x9 : 0xb;
434 case ARMFault_Translation
:
435 fsc
= fi
->level
== 1 ? 0x5 : 0x7;
437 case ARMFault_SyncExternal
:
438 fsc
= 0x8 | (fi
->ea
<< 12);
440 case ARMFault_SyncExternalOnWalk
:
441 fsc
= fi
->level
== 1 ? 0xc : 0xe;
442 fsc
|= (fi
->ea
<< 12);
444 case ARMFault_SyncParity
:
447 case ARMFault_SyncParityOnWalk
:
448 fsc
= fi
->level
== 1 ? 0x40c : 0x40e;
450 case ARMFault_AsyncParity
:
453 case ARMFault_AsyncExternal
:
454 fsc
= 0x406 | (fi
->ea
<< 12);
459 case ARMFault_TLBConflict
:
462 case ARMFault_Lockdown
:
465 case ARMFault_Exclusive
:
468 case ARMFault_ICacheMaint
:
471 case ARMFault_Background
:
474 case ARMFault_QEMU_NSCExec
:
475 fsc
= M_FAKE_FSR_NSC_EXEC
;
477 case ARMFault_QEMU_SFault
:
478 fsc
= M_FAKE_FSR_SFAULT
;
481 /* Other faults can't occur in a context that requires a
482 * short-format status code.
484 g_assert_not_reached();
487 fsc
|= (fi
->domain
<< 4);
492 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
493 * Compare pseudocode EncodeLDFSC(), though unlike that function
494 * we fill in also the LPAE bit 9 of a DFSR format.
496 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo
*fi
)
503 case ARMFault_AddressSize
:
504 assert(fi
->level
>= -1 && fi
->level
<= 3);
511 case ARMFault_AccessFlag
:
512 assert(fi
->level
>= 0 && fi
->level
<= 3);
513 fsc
= 0b001000 | fi
->level
;
515 case ARMFault_Permission
:
516 assert(fi
->level
>= 0 && fi
->level
<= 3);
517 fsc
= 0b001100 | fi
->level
;
519 case ARMFault_Translation
:
520 assert(fi
->level
>= -1 && fi
->level
<= 3);
524 fsc
= 0b000100 | fi
->level
;
527 case ARMFault_SyncExternal
:
528 fsc
= 0x10 | (fi
->ea
<< 12);
530 case ARMFault_SyncExternalOnWalk
:
531 assert(fi
->level
>= -1 && fi
->level
<= 3);
535 fsc
= 0b010100 | fi
->level
;
539 case ARMFault_SyncParity
:
542 case ARMFault_SyncParityOnWalk
:
543 assert(fi
->level
>= -1 && fi
->level
<= 3);
547 fsc
= 0b011100 | fi
->level
;
550 case ARMFault_AsyncParity
:
553 case ARMFault_AsyncExternal
:
554 fsc
= 0x11 | (fi
->ea
<< 12);
556 case ARMFault_Alignment
:
562 case ARMFault_TLBConflict
:
565 case ARMFault_UnsuppAtomicUpdate
:
568 case ARMFault_Lockdown
:
571 case ARMFault_Exclusive
:
574 case ARMFault_GPCFOnWalk
:
575 assert(fi
->level
>= -1 && fi
->level
<= 3);
579 fsc
= 0b100100 | fi
->level
;
582 case ARMFault_GPCFOnOutput
:
586 /* Other faults can't occur in a context that requires a
587 * long-format status code.
589 g_assert_not_reached();
596 static inline bool arm_extabort_type(MemTxResult result
)
598 /* The EA bit in syndromes and fault status registers is an
599 * IMPDEF classification of external aborts. ARM implementations
600 * usually use this to indicate AXI bus Decode error (0) or
601 * Slave error (1); in QEMU we follow that.
603 return result
!= MEMTX_DECODE_ERROR
;
606 #ifdef CONFIG_USER_ONLY
607 void arm_cpu_record_sigsegv(CPUState
*cpu
, vaddr addr
,
608 MMUAccessType access_type
,
609 bool maperr
, uintptr_t ra
);
610 void arm_cpu_record_sigbus(CPUState
*cpu
, vaddr addr
,
611 MMUAccessType access_type
, uintptr_t ra
);
613 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
614 MMUAccessType access_type
, int mmu_idx
,
615 bool probe
, uintptr_t retaddr
);
618 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx
)
620 return mmu_idx
& ARM_MMU_IDX_COREIDX_MASK
;
623 static inline ARMMMUIdx
core_to_arm_mmu_idx(CPUARMState
*env
, int mmu_idx
)
625 if (arm_feature(env
, ARM_FEATURE_M
)) {
626 return mmu_idx
| ARM_MMU_IDX_M
;
628 return mmu_idx
| ARM_MMU_IDX_A
;
632 static inline ARMMMUIdx
core_to_aa64_mmu_idx(int mmu_idx
)
634 /* AArch64 is always a-profile. */
635 return mmu_idx
| ARM_MMU_IDX_A
;
638 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
);
640 /* Return the MMU index for a v7M CPU in the specified security state */
641 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
);
644 * Return true if the stage 1 translation regime is using LPAE
647 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
);
649 /* Raise a data fault alignment exception for the specified virtual address */
650 G_NORETURN
void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
651 MMUAccessType access_type
,
652 int mmu_idx
, uintptr_t retaddr
);
654 #ifndef CONFIG_USER_ONLY
655 /* arm_cpu_do_transaction_failed: handle a memory system error response
656 * (eg "no device/memory present at address") by raising an external abort
659 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
660 vaddr addr
, unsigned size
,
661 MMUAccessType access_type
,
662 int mmu_idx
, MemTxAttrs attrs
,
663 MemTxResult response
, uintptr_t retaddr
);
666 /* Call any registered EL change hooks */
667 static inline void arm_call_pre_el_change_hook(ARMCPU
*cpu
)
669 ARMELChangeHook
*hook
, *next
;
670 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
671 hook
->hook(cpu
, hook
->opaque
);
674 static inline void arm_call_el_change_hook(ARMCPU
*cpu
)
676 ARMELChangeHook
*hook
, *next
;
677 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
678 hook
->hook(cpu
, hook
->opaque
);
682 /* Return true if this address translation regime has two ranges. */
683 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx
)
686 case ARMMMUIdx_Stage1_E0
:
687 case ARMMMUIdx_Stage1_E1
:
688 case ARMMMUIdx_Stage1_E1_PAN
:
689 case ARMMMUIdx_E10_0
:
690 case ARMMMUIdx_E10_1
:
691 case ARMMMUIdx_E10_1_PAN
:
692 case ARMMMUIdx_E20_0
:
693 case ARMMMUIdx_E20_2
:
694 case ARMMMUIdx_E20_2_PAN
:
701 static inline bool regime_is_pan(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
704 case ARMMMUIdx_Stage1_E1_PAN
:
705 case ARMMMUIdx_E10_1_PAN
:
706 case ARMMMUIdx_E20_2_PAN
:
713 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx
)
715 return mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
;
718 /* Return the exception level which controls this address translation regime */
719 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
722 case ARMMMUIdx_E20_0
:
723 case ARMMMUIdx_E20_2
:
724 case ARMMMUIdx_E20_2_PAN
:
725 case ARMMMUIdx_Stage2
:
726 case ARMMMUIdx_Stage2_S
:
731 case ARMMMUIdx_E10_0
:
732 case ARMMMUIdx_Stage1_E0
:
733 return arm_el_is_aa64(env
, 3) || !arm_is_secure_below_el3(env
) ? 1 : 3;
734 case ARMMMUIdx_Stage1_E1
:
735 case ARMMMUIdx_Stage1_E1_PAN
:
736 case ARMMMUIdx_E10_1
:
737 case ARMMMUIdx_E10_1_PAN
:
738 case ARMMMUIdx_MPrivNegPri
:
739 case ARMMMUIdx_MUserNegPri
:
740 case ARMMMUIdx_MPriv
:
741 case ARMMMUIdx_MUser
:
742 case ARMMMUIdx_MSPrivNegPri
:
743 case ARMMMUIdx_MSUserNegPri
:
744 case ARMMMUIdx_MSPriv
:
745 case ARMMMUIdx_MSUser
:
748 g_assert_not_reached();
752 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
755 case ARMMMUIdx_E20_0
:
756 case ARMMMUIdx_Stage1_E0
:
757 case ARMMMUIdx_MUser
:
758 case ARMMMUIdx_MSUser
:
759 case ARMMMUIdx_MUserNegPri
:
760 case ARMMMUIdx_MSUserNegPri
:
764 case ARMMMUIdx_E10_0
:
765 case ARMMMUIdx_E10_1
:
766 case ARMMMUIdx_E10_1_PAN
:
767 g_assert_not_reached();
771 /* Return the SCTLR value which controls this address translation regime */
772 static inline uint64_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
774 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
778 * These are the fields in VTCR_EL2 which affect both the Secure stage 2
779 * and the Non-Secure stage 2 translation regimes (and hence which are
780 * not present in VSTCR_EL2).
782 #define VTCR_SHARED_FIELD_MASK \
783 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
784 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
787 /* Return the value of the TCR controlling this translation regime */
788 static inline uint64_t regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
790 if (mmu_idx
== ARMMMUIdx_Stage2
) {
791 return env
->cp15
.vtcr_el2
;
793 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
795 * Secure stage 2 shares fields from VTCR_EL2. We merge those
796 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
797 * value so the callers don't need to special case this.
799 * If a future architecture change defines bits in VSTCR_EL2 that
800 * overlap with these VTCR_EL2 fields we may need to revisit this.
802 uint64_t v
= env
->cp15
.vstcr_el2
& ~VTCR_SHARED_FIELD_MASK
;
803 v
|= env
->cp15
.vtcr_el2
& VTCR_SHARED_FIELD_MASK
;
806 return env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
809 /* Return true if the translation regime is using LPAE format page tables */
810 static inline bool regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
812 int el
= regime_el(env
, mmu_idx
);
813 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
816 if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
817 arm_feature(env
, ARM_FEATURE_V8
)) {
820 if (arm_feature(env
, ARM_FEATURE_LPAE
)
821 && (regime_tcr(env
, mmu_idx
) & TTBCR_EAE
)) {
828 * arm_num_brps: Return number of implemented breakpoints.
829 * Note that the ID register BRPS field is "number of bps - 1",
830 * and we return the actual number of breakpoints.
832 static inline int arm_num_brps(ARMCPU
*cpu
)
834 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
835 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, BRPS
) + 1;
837 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, BRPS
) + 1;
842 * arm_num_wrps: Return number of implemented watchpoints.
843 * Note that the ID register WRPS field is "number of wps - 1",
844 * and we return the actual number of watchpoints.
846 static inline int arm_num_wrps(ARMCPU
*cpu
)
848 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
849 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, WRPS
) + 1;
851 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, WRPS
) + 1;
856 * arm_num_ctx_cmps: Return number of implemented context comparators.
857 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
858 * and we return the actual number of comparators.
860 static inline int arm_num_ctx_cmps(ARMCPU
*cpu
)
862 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
863 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, CTX_CMPS
) + 1;
865 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, CTX_CMPS
) + 1;
870 * v7m_using_psp: Return true if using process stack pointer
871 * Return true if the CPU is currently using the process stack
872 * pointer, or false if it is using the main stack pointer.
874 static inline bool v7m_using_psp(CPUARMState
*env
)
876 /* Handler mode always uses the main stack; for thread mode
877 * the CONTROL.SPSEL bit determines the answer.
878 * Note that in v7M it is not possible to be in Handler mode with
879 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
881 return !arm_v7m_is_handler_mode(env
) &&
882 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
886 * v7m_sp_limit: Return SP limit for current CPU state
887 * Return the SP limit value for the current CPU security state
890 static inline uint32_t v7m_sp_limit(CPUARMState
*env
)
892 if (v7m_using_psp(env
)) {
893 return env
->v7m
.psplim
[env
->v7m
.secure
];
895 return env
->v7m
.msplim
[env
->v7m
.secure
];
901 * Return true if the v7M CPACR permits access to the FPU for the specified
902 * security state and privilege level.
904 static inline bool v7m_cpacr_pass(CPUARMState
*env
,
905 bool is_secure
, bool is_priv
)
907 switch (extract32(env
->v7m
.cpacr
[is_secure
], 20, 2)) {
909 case 2: /* UNPREDICTABLE: we treat like 0 */
916 g_assert_not_reached();
921 * aarch32_mode_name(): Return name of the AArch32 CPU mode
922 * @psr: Program Status Register indicating CPU mode
924 * Returns, for debug logging purposes, a printable representation
925 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
926 * the low bits of the specified PSR.
928 static inline const char *aarch32_mode_name(uint32_t psr
)
930 static const char cpu_mode_names
[16][4] = {
931 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
932 "???", "???", "hyp", "und", "???", "???", "???", "sys"
935 return cpu_mode_names
[psr
& 0xf];
939 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
941 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
942 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
943 * Must be called with the BQL held.
945 void arm_cpu_update_virq(ARMCPU
*cpu
);
948 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
950 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
951 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
952 * Must be called with the BQL held.
954 void arm_cpu_update_vfiq(ARMCPU
*cpu
);
957 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
959 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
960 * following a change to the HCR_EL2.VSE bit.
962 void arm_cpu_update_vserr(ARMCPU
*cpu
);
966 * @env: The cpu environment
967 * @el: The EL to use.
969 * Return the full ARMMMUIdx for the translation regime for EL.
971 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
);
975 * @env: The cpu environment
977 * Return the full ARMMMUIdx for the current translation regime.
979 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
);
982 * arm_stage1_mmu_idx:
983 * @env: The cpu environment
985 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
987 #ifdef CONFIG_USER_ONLY
988 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
990 return ARMMMUIdx_Stage1_E0
;
992 static inline ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
994 return ARMMMUIdx_Stage1_E0
;
997 ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
);
998 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
);
1002 * arm_mmu_idx_is_stage1_of_2:
1003 * @mmu_idx: The ARMMMUIdx to test
1005 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
1006 * first stage of a two stage regime.
1008 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx
)
1011 case ARMMMUIdx_Stage1_E0
:
1012 case ARMMMUIdx_Stage1_E1
:
1013 case ARMMMUIdx_Stage1_E1_PAN
:
1020 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features
,
1021 const ARMISARegisters
*id
)
1023 uint32_t valid
= CPSR_M
| CPSR_AIF
| CPSR_IL
| CPSR_NZCV
;
1025 if ((features
>> ARM_FEATURE_V4T
) & 1) {
1028 if ((features
>> ARM_FEATURE_V5
) & 1) {
1029 valid
|= CPSR_Q
; /* V5TE in reality*/
1031 if ((features
>> ARM_FEATURE_V6
) & 1) {
1032 valid
|= CPSR_E
| CPSR_GE
;
1034 if ((features
>> ARM_FEATURE_THUMB2
) & 1) {
1037 if (isar_feature_aa32_jazelle(id
)) {
1040 if (isar_feature_aa32_pan(id
)) {
1043 if (isar_feature_aa32_dit(id
)) {
1046 if (isar_feature_aa32_ssbs(id
)) {
1053 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters
*id
)
1057 valid
= PSTATE_M
| PSTATE_DAIF
| PSTATE_IL
| PSTATE_SS
| PSTATE_NZCV
;
1058 if (isar_feature_aa64_bti(id
)) {
1059 valid
|= PSTATE_BTYPE
;
1061 if (isar_feature_aa64_pan(id
)) {
1062 valid
|= PSTATE_PAN
;
1064 if (isar_feature_aa64_uao(id
)) {
1065 valid
|= PSTATE_UAO
;
1067 if (isar_feature_aa64_dit(id
)) {
1068 valid
|= PSTATE_DIT
;
1070 if (isar_feature_aa64_ssbs(id
)) {
1071 valid
|= PSTATE_SSBS
;
1073 if (isar_feature_aa64_mte(id
)) {
1074 valid
|= PSTATE_TCO
;
1080 /* Granule size (i.e. page size) */
1081 typedef enum ARMGranuleSize
{
1082 /* Same order as TG0 encoding */
1090 * arm_granule_bits: Return address size of the granule in bits
1092 * Return the address size of the granule in bits. This corresponds
1093 * to the pseudocode TGxGranuleBits().
1095 static inline int arm_granule_bits(ARMGranuleSize gran
)
1105 g_assert_not_reached();
1110 * Parameters of a given virtual address, as extracted from the
1111 * translation control register (TCR) for a given regime.
1113 typedef struct ARMVAParameters
{
1117 unsigned select
: 1;
1121 bool tsz_oob
: 1; /* tsz has been clamped to legal range */
1125 ARMGranuleSize gran
: 2;
1129 * aa64_va_parameters: Return parameters for an AArch64 virtual address
1131 * @va: virtual address to look up
1132 * @mmu_idx: determines translation regime to use
1133 * @data: true if this is a data access
1134 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1135 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1137 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
1138 ARMMMUIdx mmu_idx
, bool data
,
1141 int aa64_va_parameter_tbi(uint64_t tcr
, ARMMMUIdx mmu_idx
);
1142 int aa64_va_parameter_tbid(uint64_t tcr
, ARMMMUIdx mmu_idx
);
1143 int aa64_va_parameter_tcma(uint64_t tcr
, ARMMMUIdx mmu_idx
);
1145 /* Determine if allocation tags are available. */
1146 static inline bool allocation_tag_access_enabled(CPUARMState
*env
, int el
,
1150 && arm_feature(env
, ARM_FEATURE_EL3
)
1151 && !(env
->cp15
.scr_el3
& SCR_ATA
)) {
1154 if (el
< 2 && arm_is_el2_enabled(env
)) {
1155 uint64_t hcr
= arm_hcr_el2_eff(env
);
1156 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
1160 sctlr
&= (el
== 0 ? SCTLR_ATA0
: SCTLR_ATA
);
1164 #ifndef CONFIG_USER_ONLY
1166 /* Security attributes for an address, as returned by v8m_security_lookup. */
1167 typedef struct V8M_SAttributes
{
1168 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
1177 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
1178 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1179 bool secure
, V8M_SAttributes
*sattrs
);
1181 /* Cacheability and shareability attributes for a memory access */
1182 typedef struct ARMCacheAttrs
{
1184 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1185 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1187 unsigned int attrs
:8;
1188 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
1189 bool is_s2_format
:1;
1192 /* Fields that are valid upon success. */
1193 typedef struct GetPhysAddrResult
{
1195 ARMCacheAttrs cacheattrs
;
1196 } GetPhysAddrResult
;
1199 * get_phys_addr: get the physical address for a virtual address
1201 * @address: virtual address to get physical address for
1202 * @access_type: 0 for read, 1 for write, 2 for execute
1203 * @mmu_idx: MMU index indicating required translation regime
1204 * @result: set on translation success.
1205 * @fi: set to fault info if the translation fails
1207 * Find the physical address corresponding to the given virtual address,
1208 * by doing a translation table walk on MMU based systems or using the
1209 * MPU state on MPU based systems.
1211 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1212 * prot and page_size may not be filled in, and the populated fsr value provides
1213 * information on why the translation aborted, in the format of a
1214 * DFSR/IFSR fault register, with the following caveats:
1215 * * we honour the short vs long DFSR format differences.
1216 * * the WnR bit is never set (the caller must do this).
1217 * * for PSMAv5 based systems we don't bother to return a full FSR format
1220 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
1221 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1222 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
1223 __attribute__((nonnull
));
1226 * get_phys_addr_with_space_nogpc: get the physical address for a virtual
1229 * @address: virtual address to get physical address for
1230 * @access_type: 0 for read, 1 for write, 2 for execute
1231 * @mmu_idx: MMU index indicating required translation regime
1232 * @space: security space for the access
1233 * @result: set on translation success.
1234 * @fi: set to fault info if the translation fails
1236 * Similar to get_phys_addr, but use the given security space and don't perform
1237 * a Granule Protection Check on the resulting address.
1239 bool get_phys_addr_with_space_nogpc(CPUARMState
*env
, target_ulong address
,
1240 MMUAccessType access_type
,
1241 ARMMMUIdx mmu_idx
, ARMSecuritySpace space
,
1242 GetPhysAddrResult
*result
,
1243 ARMMMUFaultInfo
*fi
)
1244 __attribute__((nonnull
));
1246 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
1247 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1248 bool is_secure
, GetPhysAddrResult
*result
,
1249 ARMMMUFaultInfo
*fi
, uint32_t *mregion
);
1251 void arm_log_exception(CPUState
*cs
);
1253 #endif /* !CONFIG_USER_ONLY */
1256 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1257 * the same simd_desc() encoding due to restrictions on size.
1258 * Use these instead.
1260 FIELD(PREDDESC
, OPRSZ
, 0, 6)
1261 FIELD(PREDDESC
, ESZ
, 6, 2)
1262 FIELD(PREDDESC
, DATA
, 8, 24)
1265 * The SVE simd_data field, for memory ops, contains either
1266 * rd (5 bits) or a shift count (2 bits).
1268 #define SVE_MTEDESC_SHIFT 5
1270 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1271 FIELD(MTEDESC
, MIDX
, 0, 4)
1272 FIELD(MTEDESC
, TBI
, 4, 2)
1273 FIELD(MTEDESC
, TCMA
, 6, 2)
1274 FIELD(MTEDESC
, WRITE
, 8, 1)
1275 FIELD(MTEDESC
, ALIGN
, 9, 3)
1276 FIELD(MTEDESC
, SIZEM1
, 12, SIMD_DATA_BITS
- 12) /* size - 1 */
1278 bool mte_probe(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
);
1279 uint64_t mte_check(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
, uintptr_t ra
);
1282 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
1284 * @ptr: start address of memory region (dirty pointer)
1285 * @size: length of region (guaranteed not to cross a page boundary)
1286 * @desc: MTEDESC descriptor word (0 means no MTE checks)
1287 * Returns: the size of the region that can be copied without hitting
1288 * an MTE tag failure
1290 * Note that we assume that the caller has already checked the TBI
1291 * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1294 uint64_t mte_mops_probe(CPUARMState
*env
, uint64_t ptr
, uint64_t size
,
1298 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
1299 * operation going in the reverse direction
1301 * @ptr: *end* address of memory region (dirty pointer)
1302 * @size: length of region (guaranteed not to cross a page boundary)
1303 * @desc: MTEDESC descriptor word (0 means no MTE checks)
1304 * Returns: the size of the region that can be copied without hitting
1305 * an MTE tag failure
1307 * Note that we assume that the caller has already checked the TBI
1308 * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1311 uint64_t mte_mops_probe_rev(CPUARMState
*env
, uint64_t ptr
, uint64_t size
,
1315 * mte_check_fail: Record an MTE tag check failure
1317 * @desc: MTEDESC descriptor word
1318 * @dirty_ptr: Failing dirty address
1321 * This may never return (if the MTE tag checks are configured to fault).
1323 void mte_check_fail(CPUARMState
*env
, uint32_t desc
,
1324 uint64_t dirty_ptr
, uintptr_t ra
);
1327 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
1329 * @dirty_ptr: Start address of memory region (dirty pointer)
1330 * @size: length of region (guaranteed not to cross page boundary)
1331 * @desc: MTEDESC descriptor word
1333 void mte_mops_set_tags(CPUARMState
*env
, uint64_t dirty_ptr
, uint64_t size
,
1336 static inline int allocation_tag_from_addr(uint64_t ptr
)
1338 return extract64(ptr
, 56, 4);
1341 static inline uint64_t address_with_allocation_tag(uint64_t ptr
, int rtag
)
1343 return deposit64(ptr
, 56, 4, rtag
);
1346 /* Return true if tbi bits mean that the access is checked. */
1347 static inline bool tbi_check(uint32_t desc
, int bit55
)
1349 return (desc
>> (R_MTEDESC_TBI_SHIFT
+ bit55
)) & 1;
1352 /* Return true if tcma bits mean that the access is unchecked. */
1353 static inline bool tcma_check(uint32_t desc
, int bit55
, int ptr_tag
)
1356 * We had extracted bit55 and ptr_tag for other reasons, so fold
1357 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1359 bool match
= ((ptr_tag
+ bit55
) & 0xf) == 0;
1360 bool tcma
= (desc
>> (R_MTEDESC_TCMA_SHIFT
+ bit55
)) & 1;
1361 return tcma
&& match
;
1365 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1366 * for the tag to be present in the FAR_ELx register. But for user-only
1367 * mode, we do not have a TLB with which to implement this, so we must
1368 * remove the top byte.
1370 static inline uint64_t useronly_clean_ptr(uint64_t ptr
)
1372 #ifdef CONFIG_USER_ONLY
1373 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1374 ptr
&= sextract64(ptr
, 0, 56);
1379 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc
, uint64_t ptr
)
1381 #ifdef CONFIG_USER_ONLY
1382 int64_t clean_ptr
= sextract64(ptr
, 0, 56);
1383 if (tbi_check(desc
, clean_ptr
< 0)) {
1390 /* Values for M-profile PSR.ECI for MVE insns */
1392 ECI_NONE
= 0, /* No completed beats */
1393 ECI_A0
= 1, /* Completed: A0 */
1394 ECI_A0A1
= 2, /* Completed: A0, A1 */
1396 ECI_A0A1A2
= 4, /* Completed: A0, A1, A2 */
1397 ECI_A0A1A2B0
= 5, /* Completed: A0, A1, A2, B0 */
1398 /* All other values reserved */
1401 /* Definitions for the PMU registers */
1402 #define PMCRN_MASK 0xf800
1403 #define PMCRN_SHIFT 11
1413 * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1414 * which can be written as 1 to trigger behaviour but which stay RAZ).
1416 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1418 #define PMXEVTYPER_P 0x80000000
1419 #define PMXEVTYPER_U 0x40000000
1420 #define PMXEVTYPER_NSK 0x20000000
1421 #define PMXEVTYPER_NSU 0x10000000
1422 #define PMXEVTYPER_NSH 0x08000000
1423 #define PMXEVTYPER_M 0x04000000
1424 #define PMXEVTYPER_MT 0x02000000
1425 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1426 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1427 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1428 PMXEVTYPER_M | PMXEVTYPER_MT | \
1429 PMXEVTYPER_EVTCOUNT)
1431 #define PMCCFILTR 0xf8000000
1432 #define PMCCFILTR_M PMXEVTYPER_M
1433 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1435 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1437 ARMCPU
*cpu
= env_archcpu(env
);
1439 return (cpu
->isar
.reset_pmcr_el0
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1442 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1443 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1445 return (1ULL << 31) | ((1ULL << pmu_num_counters(env
)) - 1);
1448 #ifdef TARGET_AARCH64
1449 int arm_gen_dynamic_svereg_xml(CPUState
*cpu
, int base_reg
);
1450 int aarch64_gdb_get_sve_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
);
1451 int aarch64_gdb_set_sve_reg(CPUARMState
*env
, uint8_t *buf
, int reg
);
1452 int aarch64_gdb_get_fpu_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
);
1453 int aarch64_gdb_set_fpu_reg(CPUARMState
*env
, uint8_t *buf
, int reg
);
1454 int aarch64_gdb_get_pauth_reg(CPUARMState
*env
, GByteArray
*buf
, int reg
);
1455 int aarch64_gdb_set_pauth_reg(CPUARMState
*env
, uint8_t *buf
, int reg
);
1456 void arm_cpu_sve_finalize(ARMCPU
*cpu
, Error
**errp
);
1457 void arm_cpu_sme_finalize(ARMCPU
*cpu
, Error
**errp
);
1458 void arm_cpu_pauth_finalize(ARMCPU
*cpu
, Error
**errp
);
1459 void arm_cpu_lpa2_finalize(ARMCPU
*cpu
, Error
**errp
);
1460 void aarch64_max_tcg_initfn(Object
*obj
);
1461 void aarch64_add_pauth_properties(Object
*obj
);
1462 void aarch64_add_sve_properties(Object
*obj
);
1463 void aarch64_add_sme_properties(Object
*obj
);
1466 /* Read the CONTROL register as the MRS instruction would. */
1467 uint32_t arm_v7m_mrs_control(CPUARMState
*env
, uint32_t secure
);
1470 * Return a pointer to the location where we currently store the
1471 * stack pointer for the requested security state and thread mode.
1472 * This pointer will become invalid if the CPU state is updated
1473 * such that the stack pointers are switched around (eg changing
1474 * the SPSEL control bit).
1476 uint32_t *arm_v7m_get_sp_ptr(CPUARMState
*env
, bool secure
,
1477 bool threadmode
, bool spsel
);
1479 bool el_is_in_host(CPUARMState
*env
, int el
);
1481 void aa32_max_features(ARMCPU
*cpu
);
1482 int exception_target_el(CPUARMState
*env
);
1483 bool arm_singlestep_active(CPUARMState
*env
);
1484 bool arm_generate_debug_exceptions(CPUARMState
*env
);
1488 * @param: parameters defining the MMU setup
1490 * Return a mask of the address bits that contain the authentication code,
1491 * given the MMU config defined by @param.
1493 static inline uint64_t pauth_ptr_mask(ARMVAParameters param
)
1495 int bot_pac_bit
= 64 - param
.tsz
;
1496 int top_pac_bit
= 64 - 8 * param
.tbi
;
1498 return MAKE_64BIT_MASK(bot_pac_bit
, top_pac_bit
- bot_pac_bit
);
1501 /* Add the cpreg definitions for debug related system registers */
1502 void define_debug_regs(ARMCPU
*cpu
);
1504 /* Effective value of MDCR_EL2 */
1505 static inline uint64_t arm_mdcr_el2_eff(CPUARMState
*env
)
1507 return arm_is_el2_enabled(env
) ? env
->cp15
.mdcr_el2
: 0;
1510 /* Powers of 2 for sve_vq_map et al. */
1511 #define SVE_VQ_POW2_MAP \
1512 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1513 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1516 * Return true if it is possible to take a fine-grained-trap to EL2.
1518 static inline bool arm_fgt_active(CPUARMState
*env
, int el
)
1521 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1522 * that can affect EL0, but it is harmless to do the test also for
1523 * traps on registers that are only accessible at EL1 because if the test
1524 * returns true then we can't be executing at EL1 anyway.
1525 * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1526 * traps from AArch32 only happen for the EL0 is AArch32 case.
1528 return cpu_isar_feature(aa64_fgt
, env_archcpu(env
)) &&
1529 el
< 2 && arm_is_el2_enabled(env
) &&
1530 arm_el_is_aa64(env
, 1) &&
1531 (arm_hcr_el2_eff(env
) & (HCR_E2H
| HCR_TGE
)) != (HCR_E2H
| HCR_TGE
) &&
1532 (!arm_feature(env
, ARM_FEATURE_EL3
) || (env
->cp15
.scr_el3
& SCR_FGTEN
));
1535 void assert_hflags_rebuild_correctly(CPUARMState
*env
);
1538 * Although the ARM implementation of hardware assisted debugging
1539 * allows for different breakpoints per-core, the current GDB
1540 * interface treats them as a global pool of registers (which seems to
1541 * be the case for x86, ppc and s390). As a result we store one copy
1542 * of registers which is used for all active cores.
1544 * Write access is serialised by virtue of the GDB protocol which
1545 * updates things. Read access (i.e. when the values are copied to the
1546 * vCPU) is also gated by GDB's run control.
1548 * This is not unreasonable as most of the time debugging kernels you
1549 * never know which core will eventually execute your function.
1558 * The watchpoint registers can cover more area than the requested
1559 * watchpoint so we need to store the additional information
1560 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1561 * when the watchpoint is hit.
1566 CPUWatchpoint details
;
1569 /* Maximum and current break/watch point counts */
1570 extern int max_hw_bps
, max_hw_wps
;
1571 extern GArray
*hw_breakpoints
, *hw_watchpoints
;
1573 #define cur_hw_wps (hw_watchpoints->len)
1574 #define cur_hw_bps (hw_breakpoints->len)
1575 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1576 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1578 bool find_hw_breakpoint(CPUState
*cpu
, target_ulong pc
);
1579 int insert_hw_breakpoint(target_ulong pc
);
1580 int delete_hw_breakpoint(target_ulong pc
);
1582 bool check_watchpoint_in_range(int i
, target_ulong addr
);
1583 CPUWatchpoint
*find_hw_watchpoint(CPUState
*cpu
, target_ulong addr
);
1584 int insert_hw_watchpoint(target_ulong addr
, target_ulong len
, int type
);
1585 int delete_hw_watchpoint(target_ulong addr
, target_ulong len
, int type
);