2 * QEMU ARM CPU -- internal functions and types
4 * Copyright (c) 2014 Linaro Ltd
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
28 #include "hw/registerfields.h"
29 #include "tcg/tcg-gvec-desc.h"
32 /* register banks for CPU modes */
42 static inline bool excp_is_internal(int excp
)
44 /* Return true if this exception number represents a QEMU-internal
45 * exception that will not be passed to the guest.
47 return excp
== EXCP_INTERRUPT
50 || excp
== EXCP_HALTED
51 || excp
== EXCP_EXCEPTION_EXIT
52 || excp
== EXCP_KERNEL_TRAP
53 || excp
== EXCP_SEMIHOST
;
56 /* Scale factor for generic timers, ie number of ns per tick.
57 * This gives a 62.5MHz timer.
59 #define GTIMER_SCALE 16
61 /* Bit definitions for the v7M CONTROL register */
62 FIELD(V7M_CONTROL
, NPRIV
, 0, 1)
63 FIELD(V7M_CONTROL
, SPSEL
, 1, 1)
64 FIELD(V7M_CONTROL
, FPCA
, 2, 1)
65 FIELD(V7M_CONTROL
, SFPA
, 3, 1)
67 /* Bit definitions for v7M exception return payload */
68 FIELD(V7M_EXCRET
, ES
, 0, 1)
69 FIELD(V7M_EXCRET
, RES0
, 1, 1)
70 FIELD(V7M_EXCRET
, SPSEL
, 2, 1)
71 FIELD(V7M_EXCRET
, MODE
, 3, 1)
72 FIELD(V7M_EXCRET
, FTYPE
, 4, 1)
73 FIELD(V7M_EXCRET
, DCRS
, 5, 1)
74 FIELD(V7M_EXCRET
, S
, 6, 1)
75 FIELD(V7M_EXCRET
, RES1
, 7, 25) /* including the must-be-1 prefix */
77 /* Minimum value which is a magic number for exception return */
78 #define EXC_RETURN_MIN_MAGIC 0xff000000
79 /* Minimum number which is a magic number for function or exception return
80 * when using v8M security extension
82 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
84 /* We use a few fake FSR values for internal purposes in M profile.
85 * M profile cores don't have A/R format FSRs, but currently our
86 * get_phys_addr() code assumes A/R profile and reports failures via
87 * an A/R format FSR value. We then translate that into the proper
88 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
89 * Mostly the FSR values we use for this are those defined for v7PMSA,
90 * since we share some of that codepath. A few kinds of fault are
91 * only for M profile and have no A/R equivalent, though, so we have
92 * to pick a value from the reserved range (which we never otherwise
93 * generate) to use for these.
94 * These values will never be visible to the guest.
96 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
97 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
100 * raise_exception: Raise the specified exception.
101 * Raise a guest exception with the specified value, syndrome register
102 * and target exception level. This should be called from helper functions,
103 * and never returns because we will longjump back up to the CPU main loop.
105 void QEMU_NORETURN
raise_exception(CPUARMState
*env
, uint32_t excp
,
106 uint32_t syndrome
, uint32_t target_el
);
109 * Similarly, but also use unwinding to restore cpu state.
111 void QEMU_NORETURN
raise_exception_ra(CPUARMState
*env
, uint32_t excp
,
112 uint32_t syndrome
, uint32_t target_el
,
116 * For AArch64, map a given EL to an index in the banked_spsr array.
117 * Note that this mapping and the AArch32 mapping defined in bank_number()
118 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
119 * mandated mapping between each other.
121 static inline unsigned int aarch64_banked_spsr_index(unsigned int el
)
123 static const unsigned int map
[4] = {
124 [1] = BANK_SVC
, /* EL1. */
125 [2] = BANK_HYP
, /* EL2. */
126 [3] = BANK_MON
, /* EL3. */
128 assert(el
>= 1 && el
<= 3);
132 /* Map CPU modes onto saved register banks. */
133 static inline int bank_number(int mode
)
136 case ARM_CPU_MODE_USR
:
137 case ARM_CPU_MODE_SYS
:
139 case ARM_CPU_MODE_SVC
:
141 case ARM_CPU_MODE_ABT
:
143 case ARM_CPU_MODE_UND
:
145 case ARM_CPU_MODE_IRQ
:
147 case ARM_CPU_MODE_FIQ
:
149 case ARM_CPU_MODE_HYP
:
151 case ARM_CPU_MODE_MON
:
154 g_assert_not_reached();
158 * r14_bank_number: Map CPU mode onto register bank for r14
160 * Given an AArch32 CPU mode, return the index into the saved register
161 * banks to use for the R14 (LR) in that mode. This is the same as
162 * bank_number(), except for the special case of Hyp mode, where
163 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
164 * This should be used as the index into env->banked_r14[], and
165 * bank_number() used for the index into env->banked_r13[] and
166 * env->banked_spsr[].
168 static inline int r14_bank_number(int mode
)
170 return (mode
== ARM_CPU_MODE_HYP
) ? BANK_USRSYS
: bank_number(mode
);
173 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
);
174 void arm_translate_init(void);
177 void arm_cpu_synchronize_from_tb(CPUState
*cs
, const TranslationBlock
*tb
);
178 #endif /* CONFIG_TCG */
181 * aarch64_sve_zcr_get_valid_len:
183 * @start_len: maximum len to consider
185 * Return the maximum supported sve vector length <= @start_len.
186 * Note that both @start_len and the return value are in units
187 * of ZCR_ELx.LEN, so the vector bit length is (x + 1) * 128.
189 uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU
*cpu
, uint32_t start_len
);
191 enum arm_fprounding
{
200 int arm_rmode_to_sf(int rmode
);
202 static inline void aarch64_save_sp(CPUARMState
*env
, int el
)
204 if (env
->pstate
& PSTATE_SP
) {
205 env
->sp_el
[el
] = env
->xregs
[31];
207 env
->sp_el
[0] = env
->xregs
[31];
211 static inline void aarch64_restore_sp(CPUARMState
*env
, int el
)
213 if (env
->pstate
& PSTATE_SP
) {
214 env
->xregs
[31] = env
->sp_el
[el
];
216 env
->xregs
[31] = env
->sp_el
[0];
220 static inline void update_spsel(CPUARMState
*env
, uint32_t imm
)
222 unsigned int cur_el
= arm_current_el(env
);
223 /* Update PSTATE SPSel bit; this requires us to update the
224 * working stack pointer in xregs[31].
226 if (!((imm
^ env
->pstate
) & PSTATE_SP
)) {
229 aarch64_save_sp(env
, cur_el
);
230 env
->pstate
= deposit32(env
->pstate
, 0, 1, imm
);
232 /* We rely on illegal updates to SPsel from EL0 to get trapped
233 * at translation time.
235 assert(cur_el
>= 1 && cur_el
<= 3);
236 aarch64_restore_sp(env
, cur_el
);
243 * Returns the implementation defined bit-width of physical addresses.
244 * The ARMv8 reference manuals refer to this as PAMax().
246 static inline unsigned int arm_pamax(ARMCPU
*cpu
)
248 static const unsigned int pamax_map
[] = {
256 unsigned int parange
=
257 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
259 /* id_aa64mmfr0 is a read-only register so values outside of the
260 * supported mappings can be considered an implementation error. */
261 assert(parange
< ARRAY_SIZE(pamax_map
));
262 return pamax_map
[parange
];
265 /* Return true if extended addresses are enabled.
266 * This is always the case if our translation regime is 64 bit,
267 * but depends on TTBCR.EAE for 32 bit.
269 static inline bool extended_addresses_enabled(CPUARMState
*env
)
271 TCR
*tcr
= &env
->cp15
.tcr_el
[arm_is_secure(env
) ? 3 : 1];
272 return arm_el_is_aa64(env
, 1) ||
273 (arm_feature(env
, ARM_FEATURE_LPAE
) && (tcr
->raw_tcr
& TTBCR_EAE
));
276 /* Update a QEMU watchpoint based on the information the guest has set in the
277 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
279 void hw_watchpoint_update(ARMCPU
*cpu
, int n
);
280 /* Update the QEMU watchpoints for every guest watchpoint. This does a
281 * complete delete-and-reinstate of the QEMU watchpoint list and so is
282 * suitable for use after migration or on reset.
284 void hw_watchpoint_update_all(ARMCPU
*cpu
);
285 /* Update a QEMU breakpoint based on the information the guest has set in the
286 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
288 void hw_breakpoint_update(ARMCPU
*cpu
, int n
);
289 /* Update the QEMU breakpoints for every guest breakpoint. This does a
290 * complete delete-and-reinstate of the QEMU breakpoint list and so is
291 * suitable for use after migration or on reset.
293 void hw_breakpoint_update_all(ARMCPU
*cpu
);
295 /* Callback function for checking if a breakpoint should trigger. */
296 bool arm_debug_check_breakpoint(CPUState
*cs
);
298 /* Callback function for checking if a watchpoint should trigger. */
299 bool arm_debug_check_watchpoint(CPUState
*cs
, CPUWatchpoint
*wp
);
301 /* Adjust addresses (in BE32 mode) before testing against watchpoint
304 vaddr
arm_adjust_watchpoint_address(CPUState
*cs
, vaddr addr
, int len
);
306 /* Callback function for when a watchpoint or breakpoint triggers. */
307 void arm_debug_excp_handler(CPUState
*cs
);
309 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
310 static inline bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
)
314 static inline void arm_handle_psci_call(ARMCPU
*cpu
)
316 g_assert_not_reached();
319 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
320 bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
);
321 /* Actually handle a PSCI call */
322 void arm_handle_psci_call(ARMCPU
*cpu
);
326 * arm_clear_exclusive: clear the exclusive monitor
328 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
330 static inline void arm_clear_exclusive(CPUARMState
*env
)
332 env
->exclusive_addr
= -1;
336 * ARMFaultType: type of an ARM MMU fault
337 * This corresponds to the v8A pseudocode's Fault enumeration,
338 * with extensions for QEMU internal conditions.
340 typedef enum ARMFaultType
{
347 ARMFault_Translation
,
348 ARMFault_AddressSize
,
349 ARMFault_SyncExternal
,
350 ARMFault_SyncExternalOnWalk
,
352 ARMFault_SyncParityOnWalk
,
353 ARMFault_AsyncParity
,
354 ARMFault_AsyncExternal
,
356 ARMFault_TLBConflict
,
359 ARMFault_ICacheMaint
,
360 ARMFault_QEMU_NSCExec
, /* v8M: NS executing in S&NSC memory */
361 ARMFault_QEMU_SFault
, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
365 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
366 * @type: Type of fault
367 * @level: Table walk level (for translation, access flag and permission faults)
368 * @domain: Domain of the fault address (for non-LPAE CPUs only)
369 * @s2addr: Address that caused a fault at stage 2
370 * @stage2: True if we faulted at stage 2
371 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
372 * @s1ns: True if we faulted on a non-secure IPA while in secure state
373 * @ea: True if we should set the EA (external abort type) bit in syndrome
375 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo
;
376 struct ARMMMUFaultInfo
{
388 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
389 * Compare pseudocode EncodeSDFSC(), though unlike that function
390 * we set up a whole FSR-format code including domain field and
391 * putting the high bit of the FSC into bit 10.
393 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo
*fi
)
400 case ARMFault_AccessFlag
:
401 fsc
= fi
->level
== 1 ? 0x3 : 0x6;
403 case ARMFault_Alignment
:
406 case ARMFault_Permission
:
407 fsc
= fi
->level
== 1 ? 0xd : 0xf;
409 case ARMFault_Domain
:
410 fsc
= fi
->level
== 1 ? 0x9 : 0xb;
412 case ARMFault_Translation
:
413 fsc
= fi
->level
== 1 ? 0x5 : 0x7;
415 case ARMFault_SyncExternal
:
416 fsc
= 0x8 | (fi
->ea
<< 12);
418 case ARMFault_SyncExternalOnWalk
:
419 fsc
= fi
->level
== 1 ? 0xc : 0xe;
420 fsc
|= (fi
->ea
<< 12);
422 case ARMFault_SyncParity
:
425 case ARMFault_SyncParityOnWalk
:
426 fsc
= fi
->level
== 1 ? 0x40c : 0x40e;
428 case ARMFault_AsyncParity
:
431 case ARMFault_AsyncExternal
:
432 fsc
= 0x406 | (fi
->ea
<< 12);
437 case ARMFault_TLBConflict
:
440 case ARMFault_Lockdown
:
443 case ARMFault_Exclusive
:
446 case ARMFault_ICacheMaint
:
449 case ARMFault_Background
:
452 case ARMFault_QEMU_NSCExec
:
453 fsc
= M_FAKE_FSR_NSC_EXEC
;
455 case ARMFault_QEMU_SFault
:
456 fsc
= M_FAKE_FSR_SFAULT
;
459 /* Other faults can't occur in a context that requires a
460 * short-format status code.
462 g_assert_not_reached();
465 fsc
|= (fi
->domain
<< 4);
470 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
471 * Compare pseudocode EncodeLDFSC(), though unlike that function
472 * we fill in also the LPAE bit 9 of a DFSR format.
474 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo
*fi
)
481 case ARMFault_AddressSize
:
484 case ARMFault_AccessFlag
:
485 fsc
= (fi
->level
& 3) | (0x2 << 2);
487 case ARMFault_Permission
:
488 fsc
= (fi
->level
& 3) | (0x3 << 2);
490 case ARMFault_Translation
:
491 fsc
= (fi
->level
& 3) | (0x1 << 2);
493 case ARMFault_SyncExternal
:
494 fsc
= 0x10 | (fi
->ea
<< 12);
496 case ARMFault_SyncExternalOnWalk
:
497 fsc
= (fi
->level
& 3) | (0x5 << 2) | (fi
->ea
<< 12);
499 case ARMFault_SyncParity
:
502 case ARMFault_SyncParityOnWalk
:
503 fsc
= (fi
->level
& 3) | (0x7 << 2);
505 case ARMFault_AsyncParity
:
508 case ARMFault_AsyncExternal
:
509 fsc
= 0x11 | (fi
->ea
<< 12);
511 case ARMFault_Alignment
:
517 case ARMFault_TLBConflict
:
520 case ARMFault_Lockdown
:
523 case ARMFault_Exclusive
:
527 /* Other faults can't occur in a context that requires a
528 * long-format status code.
530 g_assert_not_reached();
537 static inline bool arm_extabort_type(MemTxResult result
)
539 /* The EA bit in syndromes and fault status registers is an
540 * IMPDEF classification of external aborts. ARM implementations
541 * usually use this to indicate AXI bus Decode error (0) or
542 * Slave error (1); in QEMU we follow that.
544 return result
!= MEMTX_DECODE_ERROR
;
547 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
548 MMUAccessType access_type
, int mmu_idx
,
549 bool probe
, uintptr_t retaddr
);
551 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx
)
553 return mmu_idx
& ARM_MMU_IDX_COREIDX_MASK
;
556 static inline ARMMMUIdx
core_to_arm_mmu_idx(CPUARMState
*env
, int mmu_idx
)
558 if (arm_feature(env
, ARM_FEATURE_M
)) {
559 return mmu_idx
| ARM_MMU_IDX_M
;
561 return mmu_idx
| ARM_MMU_IDX_A
;
565 static inline ARMMMUIdx
core_to_aa64_mmu_idx(int mmu_idx
)
567 /* AArch64 is always a-profile. */
568 return mmu_idx
| ARM_MMU_IDX_A
;
571 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
);
574 * Return the MMU index for a v7M CPU with all relevant information
575 * manually specified.
577 ARMMMUIdx
arm_v7m_mmu_idx_all(CPUARMState
*env
,
578 bool secstate
, bool priv
, bool negpri
);
581 * Return the MMU index for a v7M CPU in the specified security and
584 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
585 bool secstate
, bool priv
);
587 /* Return the MMU index for a v7M CPU in the specified security state */
588 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
);
590 /* Return true if the stage 1 translation regime is using LPAE format page
592 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
);
594 /* Raise a data fault alignment exception for the specified virtual address */
595 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
596 MMUAccessType access_type
,
597 int mmu_idx
, uintptr_t retaddr
);
599 /* arm_cpu_do_transaction_failed: handle a memory system error response
600 * (eg "no device/memory present at address") by raising an external abort
603 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
604 vaddr addr
, unsigned size
,
605 MMUAccessType access_type
,
606 int mmu_idx
, MemTxAttrs attrs
,
607 MemTxResult response
, uintptr_t retaddr
);
609 /* Call any registered EL change hooks */
610 static inline void arm_call_pre_el_change_hook(ARMCPU
*cpu
)
612 ARMELChangeHook
*hook
, *next
;
613 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
614 hook
->hook(cpu
, hook
->opaque
);
617 static inline void arm_call_el_change_hook(ARMCPU
*cpu
)
619 ARMELChangeHook
*hook
, *next
;
620 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
621 hook
->hook(cpu
, hook
->opaque
);
625 /* Return true if this address translation regime has two ranges. */
626 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx
)
629 case ARMMMUIdx_Stage1_E0
:
630 case ARMMMUIdx_Stage1_E1
:
631 case ARMMMUIdx_Stage1_E1_PAN
:
632 case ARMMMUIdx_Stage1_SE0
:
633 case ARMMMUIdx_Stage1_SE1
:
634 case ARMMMUIdx_Stage1_SE1_PAN
:
635 case ARMMMUIdx_E10_0
:
636 case ARMMMUIdx_E10_1
:
637 case ARMMMUIdx_E10_1_PAN
:
638 case ARMMMUIdx_E20_0
:
639 case ARMMMUIdx_E20_2
:
640 case ARMMMUIdx_E20_2_PAN
:
641 case ARMMMUIdx_SE10_0
:
642 case ARMMMUIdx_SE10_1
:
643 case ARMMMUIdx_SE10_1_PAN
:
644 case ARMMMUIdx_SE20_0
:
645 case ARMMMUIdx_SE20_2
:
646 case ARMMMUIdx_SE20_2_PAN
:
653 /* Return true if this address translation regime is secure */
654 static inline bool regime_is_secure(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
657 case ARMMMUIdx_E10_0
:
658 case ARMMMUIdx_E10_1
:
659 case ARMMMUIdx_E10_1_PAN
:
660 case ARMMMUIdx_E20_0
:
661 case ARMMMUIdx_E20_2
:
662 case ARMMMUIdx_E20_2_PAN
:
663 case ARMMMUIdx_Stage1_E0
:
664 case ARMMMUIdx_Stage1_E1
:
665 case ARMMMUIdx_Stage1_E1_PAN
:
667 case ARMMMUIdx_Stage2
:
668 case ARMMMUIdx_MPrivNegPri
:
669 case ARMMMUIdx_MUserNegPri
:
670 case ARMMMUIdx_MPriv
:
671 case ARMMMUIdx_MUser
:
674 case ARMMMUIdx_SE10_0
:
675 case ARMMMUIdx_SE10_1
:
676 case ARMMMUIdx_SE10_1_PAN
:
677 case ARMMMUIdx_SE20_0
:
678 case ARMMMUIdx_SE20_2
:
679 case ARMMMUIdx_SE20_2_PAN
:
680 case ARMMMUIdx_Stage1_SE0
:
681 case ARMMMUIdx_Stage1_SE1
:
682 case ARMMMUIdx_Stage1_SE1_PAN
:
684 case ARMMMUIdx_Stage2_S
:
685 case ARMMMUIdx_MSPrivNegPri
:
686 case ARMMMUIdx_MSUserNegPri
:
687 case ARMMMUIdx_MSPriv
:
688 case ARMMMUIdx_MSUser
:
691 g_assert_not_reached();
695 static inline bool regime_is_pan(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
698 case ARMMMUIdx_Stage1_E1_PAN
:
699 case ARMMMUIdx_Stage1_SE1_PAN
:
700 case ARMMMUIdx_E10_1_PAN
:
701 case ARMMMUIdx_E20_2_PAN
:
702 case ARMMMUIdx_SE10_1_PAN
:
703 case ARMMMUIdx_SE20_2_PAN
:
710 /* Return the exception level which controls this address translation regime */
711 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
714 case ARMMMUIdx_SE20_0
:
715 case ARMMMUIdx_SE20_2
:
716 case ARMMMUIdx_SE20_2_PAN
:
717 case ARMMMUIdx_E20_0
:
718 case ARMMMUIdx_E20_2
:
719 case ARMMMUIdx_E20_2_PAN
:
720 case ARMMMUIdx_Stage2
:
721 case ARMMMUIdx_Stage2_S
:
727 case ARMMMUIdx_SE10_0
:
728 case ARMMMUIdx_Stage1_SE0
:
729 return arm_el_is_aa64(env
, 3) ? 1 : 3;
730 case ARMMMUIdx_SE10_1
:
731 case ARMMMUIdx_SE10_1_PAN
:
732 case ARMMMUIdx_Stage1_E0
:
733 case ARMMMUIdx_Stage1_E1
:
734 case ARMMMUIdx_Stage1_E1_PAN
:
735 case ARMMMUIdx_Stage1_SE1
:
736 case ARMMMUIdx_Stage1_SE1_PAN
:
737 case ARMMMUIdx_E10_0
:
738 case ARMMMUIdx_E10_1
:
739 case ARMMMUIdx_E10_1_PAN
:
740 case ARMMMUIdx_MPrivNegPri
:
741 case ARMMMUIdx_MUserNegPri
:
742 case ARMMMUIdx_MPriv
:
743 case ARMMMUIdx_MUser
:
744 case ARMMMUIdx_MSPrivNegPri
:
745 case ARMMMUIdx_MSUserNegPri
:
746 case ARMMMUIdx_MSPriv
:
747 case ARMMMUIdx_MSUser
:
750 g_assert_not_reached();
754 /* Return the TCR controlling this translation regime */
755 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
757 if (mmu_idx
== ARMMMUIdx_Stage2
) {
758 return &env
->cp15
.vtcr_el2
;
760 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
762 * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
763 * those are not currently used by QEMU, so just return VSTCR_EL2.
765 return &env
->cp15
.vstcr_el2
;
767 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
770 /* Return the FSR value for a debug exception (watchpoint, hardware
771 * breakpoint or BKPT insn) targeting the specified exception level.
773 static inline uint32_t arm_debug_exception_fsr(CPUARMState
*env
)
775 ARMMMUFaultInfo fi
= { .type
= ARMFault_Debug
};
776 int target_el
= arm_debug_target_el(env
);
777 bool using_lpae
= false;
779 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
)) {
782 if (arm_feature(env
, ARM_FEATURE_LPAE
) &&
783 (env
->cp15
.tcr_el
[target_el
].raw_tcr
& TTBCR_EAE
)) {
789 return arm_fi_to_lfsc(&fi
);
791 return arm_fi_to_sfsc(&fi
);
796 * arm_num_brps: Return number of implemented breakpoints.
797 * Note that the ID register BRPS field is "number of bps - 1",
798 * and we return the actual number of breakpoints.
800 static inline int arm_num_brps(ARMCPU
*cpu
)
802 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
803 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, BRPS
) + 1;
805 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, BRPS
) + 1;
810 * arm_num_wrps: Return number of implemented watchpoints.
811 * Note that the ID register WRPS field is "number of wps - 1",
812 * and we return the actual number of watchpoints.
814 static inline int arm_num_wrps(ARMCPU
*cpu
)
816 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
817 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, WRPS
) + 1;
819 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, WRPS
) + 1;
824 * arm_num_ctx_cmps: Return number of implemented context comparators.
825 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
826 * and we return the actual number of comparators.
828 static inline int arm_num_ctx_cmps(ARMCPU
*cpu
)
830 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
831 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, CTX_CMPS
) + 1;
833 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, CTX_CMPS
) + 1;
838 * v7m_using_psp: Return true if using process stack pointer
839 * Return true if the CPU is currently using the process stack
840 * pointer, or false if it is using the main stack pointer.
842 static inline bool v7m_using_psp(CPUARMState
*env
)
844 /* Handler mode always uses the main stack; for thread mode
845 * the CONTROL.SPSEL bit determines the answer.
846 * Note that in v7M it is not possible to be in Handler mode with
847 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
849 return !arm_v7m_is_handler_mode(env
) &&
850 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
854 * v7m_sp_limit: Return SP limit for current CPU state
855 * Return the SP limit value for the current CPU security state
858 static inline uint32_t v7m_sp_limit(CPUARMState
*env
)
860 if (v7m_using_psp(env
)) {
861 return env
->v7m
.psplim
[env
->v7m
.secure
];
863 return env
->v7m
.msplim
[env
->v7m
.secure
];
869 * Return true if the v7M CPACR permits access to the FPU for the specified
870 * security state and privilege level.
872 static inline bool v7m_cpacr_pass(CPUARMState
*env
,
873 bool is_secure
, bool is_priv
)
875 switch (extract32(env
->v7m
.cpacr
[is_secure
], 20, 2)) {
877 case 2: /* UNPREDICTABLE: we treat like 0 */
884 g_assert_not_reached();
889 * aarch32_mode_name(): Return name of the AArch32 CPU mode
890 * @psr: Program Status Register indicating CPU mode
892 * Returns, for debug logging purposes, a printable representation
893 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
894 * the low bits of the specified PSR.
896 static inline const char *aarch32_mode_name(uint32_t psr
)
898 static const char cpu_mode_names
[16][4] = {
899 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
900 "???", "???", "hyp", "und", "???", "???", "???", "sys"
903 return cpu_mode_names
[psr
& 0xf];
907 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
909 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
910 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
911 * Must be called with the iothread lock held.
913 void arm_cpu_update_virq(ARMCPU
*cpu
);
916 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
918 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
919 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
920 * Must be called with the iothread lock held.
922 void arm_cpu_update_vfiq(ARMCPU
*cpu
);
926 * @env: The cpu environment
927 * @el: The EL to use.
929 * Return the full ARMMMUIdx for the translation regime for EL.
931 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
);
935 * @env: The cpu environment
937 * Return the full ARMMMUIdx for the current translation regime.
939 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
);
942 * arm_stage1_mmu_idx:
943 * @env: The cpu environment
945 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
947 #ifdef CONFIG_USER_ONLY
948 static inline ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
950 return ARMMMUIdx_Stage1_E0
;
953 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
);
957 * arm_mmu_idx_is_stage1_of_2:
958 * @mmu_idx: The ARMMMUIdx to test
960 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
961 * first stage of a two stage regime.
963 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx
)
966 case ARMMMUIdx_Stage1_E0
:
967 case ARMMMUIdx_Stage1_E1
:
968 case ARMMMUIdx_Stage1_E1_PAN
:
969 case ARMMMUIdx_Stage1_SE0
:
970 case ARMMMUIdx_Stage1_SE1
:
971 case ARMMMUIdx_Stage1_SE1_PAN
:
978 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features
,
979 const ARMISARegisters
*id
)
981 uint32_t valid
= CPSR_M
| CPSR_AIF
| CPSR_IL
| CPSR_NZCV
;
983 if ((features
>> ARM_FEATURE_V4T
) & 1) {
986 if ((features
>> ARM_FEATURE_V5
) & 1) {
987 valid
|= CPSR_Q
; /* V5TE in reality*/
989 if ((features
>> ARM_FEATURE_V6
) & 1) {
990 valid
|= CPSR_E
| CPSR_GE
;
992 if ((features
>> ARM_FEATURE_THUMB2
) & 1) {
995 if (isar_feature_aa32_jazelle(id
)) {
998 if (isar_feature_aa32_pan(id
)) {
1001 if (isar_feature_aa32_dit(id
)) {
1004 if (isar_feature_aa32_ssbs(id
)) {
1011 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters
*id
)
1015 valid
= PSTATE_M
| PSTATE_DAIF
| PSTATE_IL
| PSTATE_SS
| PSTATE_NZCV
;
1016 if (isar_feature_aa64_bti(id
)) {
1017 valid
|= PSTATE_BTYPE
;
1019 if (isar_feature_aa64_pan(id
)) {
1020 valid
|= PSTATE_PAN
;
1022 if (isar_feature_aa64_uao(id
)) {
1023 valid
|= PSTATE_UAO
;
1025 if (isar_feature_aa64_dit(id
)) {
1026 valid
|= PSTATE_DIT
;
1028 if (isar_feature_aa64_ssbs(id
)) {
1029 valid
|= PSTATE_SSBS
;
1031 if (isar_feature_aa64_mte(id
)) {
1032 valid
|= PSTATE_TCO
;
1039 * Parameters of a given virtual address, as extracted from the
1040 * translation control register (TCR) for a given regime.
1042 typedef struct ARMVAParameters
{
1044 unsigned select
: 1;
1052 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
1053 ARMMMUIdx mmu_idx
, bool data
);
1055 static inline int exception_target_el(CPUARMState
*env
)
1057 int target_el
= MAX(1, arm_current_el(env
));
1060 * No such thing as secure EL1 if EL3 is aarch32,
1061 * so update the target EL to EL3 in this case.
1063 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3) && target_el
== 1) {
1070 /* Determine if allocation tags are available. */
1071 static inline bool allocation_tag_access_enabled(CPUARMState
*env
, int el
,
1075 && arm_feature(env
, ARM_FEATURE_EL3
)
1076 && !(env
->cp15
.scr_el3
& SCR_ATA
)) {
1079 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
1080 uint64_t hcr
= arm_hcr_el2_eff(env
);
1081 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
1085 sctlr
&= (el
== 0 ? SCTLR_ATA0
: SCTLR_ATA
);
1089 #ifndef CONFIG_USER_ONLY
1091 /* Security attributes for an address, as returned by v8m_security_lookup. */
1092 typedef struct V8M_SAttributes
{
1093 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
1102 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
1103 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1104 V8M_SAttributes
*sattrs
);
1106 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
1107 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1108 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
1109 int *prot
, bool *is_subpage
,
1110 ARMMMUFaultInfo
*fi
, uint32_t *mregion
);
1112 /* Cacheability and shareability attributes for a memory access */
1113 typedef struct ARMCacheAttrs
{
1114 unsigned int attrs
:8; /* as in the MAIR register encoding */
1115 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
1118 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
1119 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1120 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
1121 target_ulong
*page_size
,
1122 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
1123 __attribute__((nonnull
));
1125 void arm_log_exception(int idx
);
1127 #endif /* !CONFIG_USER_ONLY */
1130 * The log2 of the words in the tag block, for GMID_EL1.BS.
1131 * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1133 #define GMID_EL1_BS 6
1135 /* We associate one allocation tag per 16 bytes, the minimum. */
1136 #define LOG2_TAG_GRANULE 4
1137 #define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
1140 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1141 * the same simd_desc() encoding due to restrictions on size.
1142 * Use these instead.
1144 FIELD(PREDDESC
, OPRSZ
, 0, 6)
1145 FIELD(PREDDESC
, ESZ
, 6, 2)
1146 FIELD(PREDDESC
, DATA
, 8, 24)
1149 * The SVE simd_data field, for memory ops, contains either
1150 * rd (5 bits) or a shift count (2 bits).
1152 #define SVE_MTEDESC_SHIFT 5
1154 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1155 FIELD(MTEDESC
, MIDX
, 0, 4)
1156 FIELD(MTEDESC
, TBI
, 4, 2)
1157 FIELD(MTEDESC
, TCMA
, 6, 2)
1158 FIELD(MTEDESC
, WRITE
, 8, 1)
1159 FIELD(MTEDESC
, SIZEM1
, 9, SIMD_DATA_BITS
- 9) /* size - 1 */
1161 bool mte_probe(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
);
1162 uint64_t mte_check(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
, uintptr_t ra
);
1164 static inline int allocation_tag_from_addr(uint64_t ptr
)
1166 return extract64(ptr
, 56, 4);
1169 static inline uint64_t address_with_allocation_tag(uint64_t ptr
, int rtag
)
1171 return deposit64(ptr
, 56, 4, rtag
);
1174 /* Return true if tbi bits mean that the access is checked. */
1175 static inline bool tbi_check(uint32_t desc
, int bit55
)
1177 return (desc
>> (R_MTEDESC_TBI_SHIFT
+ bit55
)) & 1;
1180 /* Return true if tcma bits mean that the access is unchecked. */
1181 static inline bool tcma_check(uint32_t desc
, int bit55
, int ptr_tag
)
1184 * We had extracted bit55 and ptr_tag for other reasons, so fold
1185 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1187 bool match
= ((ptr_tag
+ bit55
) & 0xf) == 0;
1188 bool tcma
= (desc
>> (R_MTEDESC_TCMA_SHIFT
+ bit55
)) & 1;
1189 return tcma
&& match
;
1193 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1194 * for the tag to be present in the FAR_ELx register. But for user-only
1195 * mode, we do not have a TLB with which to implement this, so we must
1196 * remove the top byte.
1198 static inline uint64_t useronly_clean_ptr(uint64_t ptr
)
1200 #ifdef CONFIG_USER_ONLY
1201 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1202 ptr
&= sextract64(ptr
, 0, 56);
1207 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc
, uint64_t ptr
)
1209 #ifdef CONFIG_USER_ONLY
1210 int64_t clean_ptr
= sextract64(ptr
, 0, 56);
1211 if (tbi_check(desc
, clean_ptr
< 0)) {
1218 /* Values for M-profile PSR.ECI for MVE insns */
1220 ECI_NONE
= 0, /* No completed beats */
1221 ECI_A0
= 1, /* Completed: A0 */
1222 ECI_A0A1
= 2, /* Completed: A0, A1 */
1224 ECI_A0A1A2
= 4, /* Completed: A0, A1, A2 */
1225 ECI_A0A1A2B0
= 5, /* Completed: A0, A1, A2, B0 */
1226 /* All other values reserved */