2 * QEMU ARM CPU -- internal functions and types
4 * Copyright (c) 2014 Linaro Ltd
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
28 #include "hw/registerfields.h"
31 /* register banks for CPU modes */
41 static inline bool excp_is_internal(int excp
)
43 /* Return true if this exception number represents a QEMU-internal
44 * exception that will not be passed to the guest.
46 return excp
== EXCP_INTERRUPT
49 || excp
== EXCP_HALTED
50 || excp
== EXCP_EXCEPTION_EXIT
51 || excp
== EXCP_KERNEL_TRAP
52 || excp
== EXCP_SEMIHOST
;
55 /* Scale factor for generic timers, ie number of ns per tick.
56 * This gives a 62.5MHz timer.
58 #define GTIMER_SCALE 16
60 /* Bit definitions for the v7M CONTROL register */
61 FIELD(V7M_CONTROL
, NPRIV
, 0, 1)
62 FIELD(V7M_CONTROL
, SPSEL
, 1, 1)
63 FIELD(V7M_CONTROL
, FPCA
, 2, 1)
64 FIELD(V7M_CONTROL
, SFPA
, 3, 1)
66 /* Bit definitions for v7M exception return payload */
67 FIELD(V7M_EXCRET
, ES
, 0, 1)
68 FIELD(V7M_EXCRET
, RES0
, 1, 1)
69 FIELD(V7M_EXCRET
, SPSEL
, 2, 1)
70 FIELD(V7M_EXCRET
, MODE
, 3, 1)
71 FIELD(V7M_EXCRET
, FTYPE
, 4, 1)
72 FIELD(V7M_EXCRET
, DCRS
, 5, 1)
73 FIELD(V7M_EXCRET
, S
, 6, 1)
74 FIELD(V7M_EXCRET
, RES1
, 7, 25) /* including the must-be-1 prefix */
76 /* Minimum value which is a magic number for exception return */
77 #define EXC_RETURN_MIN_MAGIC 0xff000000
78 /* Minimum number which is a magic number for function or exception return
79 * when using v8M security extension
81 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
83 /* We use a few fake FSR values for internal purposes in M profile.
84 * M profile cores don't have A/R format FSRs, but currently our
85 * get_phys_addr() code assumes A/R profile and reports failures via
86 * an A/R format FSR value. We then translate that into the proper
87 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
88 * Mostly the FSR values we use for this are those defined for v7PMSA,
89 * since we share some of that codepath. A few kinds of fault are
90 * only for M profile and have no A/R equivalent, though, so we have
91 * to pick a value from the reserved range (which we never otherwise
92 * generate) to use for these.
93 * These values will never be visible to the guest.
95 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
96 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
99 * raise_exception: Raise the specified exception.
100 * Raise a guest exception with the specified value, syndrome register
101 * and target exception level. This should be called from helper functions,
102 * and never returns because we will longjump back up to the CPU main loop.
104 void QEMU_NORETURN
raise_exception(CPUARMState
*env
, uint32_t excp
,
105 uint32_t syndrome
, uint32_t target_el
);
108 * Similarly, but also use unwinding to restore cpu state.
110 void QEMU_NORETURN
raise_exception_ra(CPUARMState
*env
, uint32_t excp
,
111 uint32_t syndrome
, uint32_t target_el
,
115 * For AArch64, map a given EL to an index in the banked_spsr array.
116 * Note that this mapping and the AArch32 mapping defined in bank_number()
117 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
118 * mandated mapping between each other.
120 static inline unsigned int aarch64_banked_spsr_index(unsigned int el
)
122 static const unsigned int map
[4] = {
123 [1] = BANK_SVC
, /* EL1. */
124 [2] = BANK_HYP
, /* EL2. */
125 [3] = BANK_MON
, /* EL3. */
127 assert(el
>= 1 && el
<= 3);
131 /* Map CPU modes onto saved register banks. */
132 static inline int bank_number(int mode
)
135 case ARM_CPU_MODE_USR
:
136 case ARM_CPU_MODE_SYS
:
138 case ARM_CPU_MODE_SVC
:
140 case ARM_CPU_MODE_ABT
:
142 case ARM_CPU_MODE_UND
:
144 case ARM_CPU_MODE_IRQ
:
146 case ARM_CPU_MODE_FIQ
:
148 case ARM_CPU_MODE_HYP
:
150 case ARM_CPU_MODE_MON
:
153 g_assert_not_reached();
157 * r14_bank_number: Map CPU mode onto register bank for r14
159 * Given an AArch32 CPU mode, return the index into the saved register
160 * banks to use for the R14 (LR) in that mode. This is the same as
161 * bank_number(), except for the special case of Hyp mode, where
162 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
163 * This should be used as the index into env->banked_r14[], and
164 * bank_number() used for the index into env->banked_r13[] and
165 * env->banked_spsr[].
167 static inline int r14_bank_number(int mode
)
169 return (mode
== ARM_CPU_MODE_HYP
) ? BANK_USRSYS
: bank_number(mode
);
172 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
);
173 void arm_translate_init(void);
176 void arm_cpu_synchronize_from_tb(CPUState
*cs
, const TranslationBlock
*tb
);
177 #endif /* CONFIG_TCG */
180 enum arm_fprounding
{
189 int arm_rmode_to_sf(int rmode
);
191 static inline void aarch64_save_sp(CPUARMState
*env
, int el
)
193 if (env
->pstate
& PSTATE_SP
) {
194 env
->sp_el
[el
] = env
->xregs
[31];
196 env
->sp_el
[0] = env
->xregs
[31];
200 static inline void aarch64_restore_sp(CPUARMState
*env
, int el
)
202 if (env
->pstate
& PSTATE_SP
) {
203 env
->xregs
[31] = env
->sp_el
[el
];
205 env
->xregs
[31] = env
->sp_el
[0];
209 static inline void update_spsel(CPUARMState
*env
, uint32_t imm
)
211 unsigned int cur_el
= arm_current_el(env
);
212 /* Update PSTATE SPSel bit; this requires us to update the
213 * working stack pointer in xregs[31].
215 if (!((imm
^ env
->pstate
) & PSTATE_SP
)) {
218 aarch64_save_sp(env
, cur_el
);
219 env
->pstate
= deposit32(env
->pstate
, 0, 1, imm
);
221 /* We rely on illegal updates to SPsel from EL0 to get trapped
222 * at translation time.
224 assert(cur_el
>= 1 && cur_el
<= 3);
225 aarch64_restore_sp(env
, cur_el
);
232 * Returns the implementation defined bit-width of physical addresses.
233 * The ARMv8 reference manuals refer to this as PAMax().
235 static inline unsigned int arm_pamax(ARMCPU
*cpu
)
237 static const unsigned int pamax_map
[] = {
245 unsigned int parange
=
246 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
248 /* id_aa64mmfr0 is a read-only register so values outside of the
249 * supported mappings can be considered an implementation error. */
250 assert(parange
< ARRAY_SIZE(pamax_map
));
251 return pamax_map
[parange
];
254 /* Return true if extended addresses are enabled.
255 * This is always the case if our translation regime is 64 bit,
256 * but depends on TTBCR.EAE for 32 bit.
258 static inline bool extended_addresses_enabled(CPUARMState
*env
)
260 TCR
*tcr
= &env
->cp15
.tcr_el
[arm_is_secure(env
) ? 3 : 1];
261 return arm_el_is_aa64(env
, 1) ||
262 (arm_feature(env
, ARM_FEATURE_LPAE
) && (tcr
->raw_tcr
& TTBCR_EAE
));
265 /* Update a QEMU watchpoint based on the information the guest has set in the
266 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
268 void hw_watchpoint_update(ARMCPU
*cpu
, int n
);
269 /* Update the QEMU watchpoints for every guest watchpoint. This does a
270 * complete delete-and-reinstate of the QEMU watchpoint list and so is
271 * suitable for use after migration or on reset.
273 void hw_watchpoint_update_all(ARMCPU
*cpu
);
274 /* Update a QEMU breakpoint based on the information the guest has set in the
275 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
277 void hw_breakpoint_update(ARMCPU
*cpu
, int n
);
278 /* Update the QEMU breakpoints for every guest breakpoint. This does a
279 * complete delete-and-reinstate of the QEMU breakpoint list and so is
280 * suitable for use after migration or on reset.
282 void hw_breakpoint_update_all(ARMCPU
*cpu
);
284 /* Callback function for checking if a watchpoint should trigger. */
285 bool arm_debug_check_watchpoint(CPUState
*cs
, CPUWatchpoint
*wp
);
287 /* Adjust addresses (in BE32 mode) before testing against watchpoint
290 vaddr
arm_adjust_watchpoint_address(CPUState
*cs
, vaddr addr
, int len
);
292 /* Callback function for when a watchpoint or breakpoint triggers. */
293 void arm_debug_excp_handler(CPUState
*cs
);
295 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
296 static inline bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
)
300 static inline void arm_handle_psci_call(ARMCPU
*cpu
)
302 g_assert_not_reached();
305 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
306 bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
);
307 /* Actually handle a PSCI call */
308 void arm_handle_psci_call(ARMCPU
*cpu
);
312 * arm_clear_exclusive: clear the exclusive monitor
314 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
316 static inline void arm_clear_exclusive(CPUARMState
*env
)
318 env
->exclusive_addr
= -1;
322 * ARMFaultType: type of an ARM MMU fault
323 * This corresponds to the v8A pseudocode's Fault enumeration,
324 * with extensions for QEMU internal conditions.
326 typedef enum ARMFaultType
{
333 ARMFault_Translation
,
334 ARMFault_AddressSize
,
335 ARMFault_SyncExternal
,
336 ARMFault_SyncExternalOnWalk
,
338 ARMFault_SyncParityOnWalk
,
339 ARMFault_AsyncParity
,
340 ARMFault_AsyncExternal
,
342 ARMFault_TLBConflict
,
345 ARMFault_ICacheMaint
,
346 ARMFault_QEMU_NSCExec
, /* v8M: NS executing in S&NSC memory */
347 ARMFault_QEMU_SFault
, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
351 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
352 * @type: Type of fault
353 * @level: Table walk level (for translation, access flag and permission faults)
354 * @domain: Domain of the fault address (for non-LPAE CPUs only)
355 * @s2addr: Address that caused a fault at stage 2
356 * @stage2: True if we faulted at stage 2
357 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
358 * @s1ns: True if we faulted on a non-secure IPA while in secure state
359 * @ea: True if we should set the EA (external abort type) bit in syndrome
361 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo
;
362 struct ARMMMUFaultInfo
{
374 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
375 * Compare pseudocode EncodeSDFSC(), though unlike that function
376 * we set up a whole FSR-format code including domain field and
377 * putting the high bit of the FSC into bit 10.
379 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo
*fi
)
386 case ARMFault_AccessFlag
:
387 fsc
= fi
->level
== 1 ? 0x3 : 0x6;
389 case ARMFault_Alignment
:
392 case ARMFault_Permission
:
393 fsc
= fi
->level
== 1 ? 0xd : 0xf;
395 case ARMFault_Domain
:
396 fsc
= fi
->level
== 1 ? 0x9 : 0xb;
398 case ARMFault_Translation
:
399 fsc
= fi
->level
== 1 ? 0x5 : 0x7;
401 case ARMFault_SyncExternal
:
402 fsc
= 0x8 | (fi
->ea
<< 12);
404 case ARMFault_SyncExternalOnWalk
:
405 fsc
= fi
->level
== 1 ? 0xc : 0xe;
406 fsc
|= (fi
->ea
<< 12);
408 case ARMFault_SyncParity
:
411 case ARMFault_SyncParityOnWalk
:
412 fsc
= fi
->level
== 1 ? 0x40c : 0x40e;
414 case ARMFault_AsyncParity
:
417 case ARMFault_AsyncExternal
:
418 fsc
= 0x406 | (fi
->ea
<< 12);
423 case ARMFault_TLBConflict
:
426 case ARMFault_Lockdown
:
429 case ARMFault_Exclusive
:
432 case ARMFault_ICacheMaint
:
435 case ARMFault_Background
:
438 case ARMFault_QEMU_NSCExec
:
439 fsc
= M_FAKE_FSR_NSC_EXEC
;
441 case ARMFault_QEMU_SFault
:
442 fsc
= M_FAKE_FSR_SFAULT
;
445 /* Other faults can't occur in a context that requires a
446 * short-format status code.
448 g_assert_not_reached();
451 fsc
|= (fi
->domain
<< 4);
456 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
457 * Compare pseudocode EncodeLDFSC(), though unlike that function
458 * we fill in also the LPAE bit 9 of a DFSR format.
460 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo
*fi
)
467 case ARMFault_AddressSize
:
470 case ARMFault_AccessFlag
:
471 fsc
= (fi
->level
& 3) | (0x2 << 2);
473 case ARMFault_Permission
:
474 fsc
= (fi
->level
& 3) | (0x3 << 2);
476 case ARMFault_Translation
:
477 fsc
= (fi
->level
& 3) | (0x1 << 2);
479 case ARMFault_SyncExternal
:
480 fsc
= 0x10 | (fi
->ea
<< 12);
482 case ARMFault_SyncExternalOnWalk
:
483 fsc
= (fi
->level
& 3) | (0x5 << 2) | (fi
->ea
<< 12);
485 case ARMFault_SyncParity
:
488 case ARMFault_SyncParityOnWalk
:
489 fsc
= (fi
->level
& 3) | (0x7 << 2);
491 case ARMFault_AsyncParity
:
494 case ARMFault_AsyncExternal
:
495 fsc
= 0x11 | (fi
->ea
<< 12);
497 case ARMFault_Alignment
:
503 case ARMFault_TLBConflict
:
506 case ARMFault_Lockdown
:
509 case ARMFault_Exclusive
:
513 /* Other faults can't occur in a context that requires a
514 * long-format status code.
516 g_assert_not_reached();
523 static inline bool arm_extabort_type(MemTxResult result
)
525 /* The EA bit in syndromes and fault status registers is an
526 * IMPDEF classification of external aborts. ARM implementations
527 * usually use this to indicate AXI bus Decode error (0) or
528 * Slave error (1); in QEMU we follow that.
530 return result
!= MEMTX_DECODE_ERROR
;
533 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
534 MMUAccessType access_type
, int mmu_idx
,
535 bool probe
, uintptr_t retaddr
);
537 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx
)
539 return mmu_idx
& ARM_MMU_IDX_COREIDX_MASK
;
542 static inline ARMMMUIdx
core_to_arm_mmu_idx(CPUARMState
*env
, int mmu_idx
)
544 if (arm_feature(env
, ARM_FEATURE_M
)) {
545 return mmu_idx
| ARM_MMU_IDX_M
;
547 return mmu_idx
| ARM_MMU_IDX_A
;
551 static inline ARMMMUIdx
core_to_aa64_mmu_idx(int mmu_idx
)
553 /* AArch64 is always a-profile. */
554 return mmu_idx
| ARM_MMU_IDX_A
;
557 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
);
560 * Return the MMU index for a v7M CPU with all relevant information
561 * manually specified.
563 ARMMMUIdx
arm_v7m_mmu_idx_all(CPUARMState
*env
,
564 bool secstate
, bool priv
, bool negpri
);
567 * Return the MMU index for a v7M CPU in the specified security and
570 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
571 bool secstate
, bool priv
);
573 /* Return the MMU index for a v7M CPU in the specified security state */
574 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
);
576 /* Return true if the stage 1 translation regime is using LPAE format page
578 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
);
580 /* Raise a data fault alignment exception for the specified virtual address */
581 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
582 MMUAccessType access_type
,
583 int mmu_idx
, uintptr_t retaddr
);
585 /* arm_cpu_do_transaction_failed: handle a memory system error response
586 * (eg "no device/memory present at address") by raising an external abort
589 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
590 vaddr addr
, unsigned size
,
591 MMUAccessType access_type
,
592 int mmu_idx
, MemTxAttrs attrs
,
593 MemTxResult response
, uintptr_t retaddr
);
595 /* Call any registered EL change hooks */
596 static inline void arm_call_pre_el_change_hook(ARMCPU
*cpu
)
598 ARMELChangeHook
*hook
, *next
;
599 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
600 hook
->hook(cpu
, hook
->opaque
);
603 static inline void arm_call_el_change_hook(ARMCPU
*cpu
)
605 ARMELChangeHook
*hook
, *next
;
606 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
607 hook
->hook(cpu
, hook
->opaque
);
611 /* Return true if this address translation regime has two ranges. */
612 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx
)
615 case ARMMMUIdx_Stage1_E0
:
616 case ARMMMUIdx_Stage1_E1
:
617 case ARMMMUIdx_Stage1_E1_PAN
:
618 case ARMMMUIdx_Stage1_SE0
:
619 case ARMMMUIdx_Stage1_SE1
:
620 case ARMMMUIdx_Stage1_SE1_PAN
:
621 case ARMMMUIdx_E10_0
:
622 case ARMMMUIdx_E10_1
:
623 case ARMMMUIdx_E10_1_PAN
:
624 case ARMMMUIdx_E20_0
:
625 case ARMMMUIdx_E20_2
:
626 case ARMMMUIdx_E20_2_PAN
:
627 case ARMMMUIdx_SE10_0
:
628 case ARMMMUIdx_SE10_1
:
629 case ARMMMUIdx_SE10_1_PAN
:
630 case ARMMMUIdx_SE20_0
:
631 case ARMMMUIdx_SE20_2
:
632 case ARMMMUIdx_SE20_2_PAN
:
639 /* Return true if this address translation regime is secure */
640 static inline bool regime_is_secure(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
643 case ARMMMUIdx_E10_0
:
644 case ARMMMUIdx_E10_1
:
645 case ARMMMUIdx_E10_1_PAN
:
646 case ARMMMUIdx_E20_0
:
647 case ARMMMUIdx_E20_2
:
648 case ARMMMUIdx_E20_2_PAN
:
649 case ARMMMUIdx_Stage1_E0
:
650 case ARMMMUIdx_Stage1_E1
:
651 case ARMMMUIdx_Stage1_E1_PAN
:
653 case ARMMMUIdx_Stage2
:
654 case ARMMMUIdx_MPrivNegPri
:
655 case ARMMMUIdx_MUserNegPri
:
656 case ARMMMUIdx_MPriv
:
657 case ARMMMUIdx_MUser
:
660 case ARMMMUIdx_SE10_0
:
661 case ARMMMUIdx_SE10_1
:
662 case ARMMMUIdx_SE10_1_PAN
:
663 case ARMMMUIdx_SE20_0
:
664 case ARMMMUIdx_SE20_2
:
665 case ARMMMUIdx_SE20_2_PAN
:
666 case ARMMMUIdx_Stage1_SE0
:
667 case ARMMMUIdx_Stage1_SE1
:
668 case ARMMMUIdx_Stage1_SE1_PAN
:
670 case ARMMMUIdx_Stage2_S
:
671 case ARMMMUIdx_MSPrivNegPri
:
672 case ARMMMUIdx_MSUserNegPri
:
673 case ARMMMUIdx_MSPriv
:
674 case ARMMMUIdx_MSUser
:
677 g_assert_not_reached();
681 static inline bool regime_is_pan(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
684 case ARMMMUIdx_Stage1_E1_PAN
:
685 case ARMMMUIdx_Stage1_SE1_PAN
:
686 case ARMMMUIdx_E10_1_PAN
:
687 case ARMMMUIdx_E20_2_PAN
:
688 case ARMMMUIdx_SE10_1_PAN
:
689 case ARMMMUIdx_SE20_2_PAN
:
696 /* Return the exception level which controls this address translation regime */
697 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
700 case ARMMMUIdx_SE20_0
:
701 case ARMMMUIdx_SE20_2
:
702 case ARMMMUIdx_SE20_2_PAN
:
703 case ARMMMUIdx_E20_0
:
704 case ARMMMUIdx_E20_2
:
705 case ARMMMUIdx_E20_2_PAN
:
706 case ARMMMUIdx_Stage2
:
707 case ARMMMUIdx_Stage2_S
:
713 case ARMMMUIdx_SE10_0
:
714 case ARMMMUIdx_Stage1_SE0
:
715 return arm_el_is_aa64(env
, 3) ? 1 : 3;
716 case ARMMMUIdx_SE10_1
:
717 case ARMMMUIdx_SE10_1_PAN
:
718 case ARMMMUIdx_Stage1_E0
:
719 case ARMMMUIdx_Stage1_E1
:
720 case ARMMMUIdx_Stage1_E1_PAN
:
721 case ARMMMUIdx_Stage1_SE1
:
722 case ARMMMUIdx_Stage1_SE1_PAN
:
723 case ARMMMUIdx_E10_0
:
724 case ARMMMUIdx_E10_1
:
725 case ARMMMUIdx_E10_1_PAN
:
726 case ARMMMUIdx_MPrivNegPri
:
727 case ARMMMUIdx_MUserNegPri
:
728 case ARMMMUIdx_MPriv
:
729 case ARMMMUIdx_MUser
:
730 case ARMMMUIdx_MSPrivNegPri
:
731 case ARMMMUIdx_MSUserNegPri
:
732 case ARMMMUIdx_MSPriv
:
733 case ARMMMUIdx_MSUser
:
736 g_assert_not_reached();
740 /* Return the TCR controlling this translation regime */
741 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
743 if (mmu_idx
== ARMMMUIdx_Stage2
) {
744 return &env
->cp15
.vtcr_el2
;
746 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
748 * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
749 * those are not currently used by QEMU, so just return VSTCR_EL2.
751 return &env
->cp15
.vstcr_el2
;
753 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
756 /* Return the FSR value for a debug exception (watchpoint, hardware
757 * breakpoint or BKPT insn) targeting the specified exception level.
759 static inline uint32_t arm_debug_exception_fsr(CPUARMState
*env
)
761 ARMMMUFaultInfo fi
= { .type
= ARMFault_Debug
};
762 int target_el
= arm_debug_target_el(env
);
763 bool using_lpae
= false;
765 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
)) {
768 if (arm_feature(env
, ARM_FEATURE_LPAE
) &&
769 (env
->cp15
.tcr_el
[target_el
].raw_tcr
& TTBCR_EAE
)) {
775 return arm_fi_to_lfsc(&fi
);
777 return arm_fi_to_sfsc(&fi
);
782 * arm_num_brps: Return number of implemented breakpoints.
783 * Note that the ID register BRPS field is "number of bps - 1",
784 * and we return the actual number of breakpoints.
786 static inline int arm_num_brps(ARMCPU
*cpu
)
788 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
789 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, BRPS
) + 1;
791 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, BRPS
) + 1;
796 * arm_num_wrps: Return number of implemented watchpoints.
797 * Note that the ID register WRPS field is "number of wps - 1",
798 * and we return the actual number of watchpoints.
800 static inline int arm_num_wrps(ARMCPU
*cpu
)
802 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
803 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, WRPS
) + 1;
805 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, WRPS
) + 1;
810 * arm_num_ctx_cmps: Return number of implemented context comparators.
811 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
812 * and we return the actual number of comparators.
814 static inline int arm_num_ctx_cmps(ARMCPU
*cpu
)
816 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
817 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, CTX_CMPS
) + 1;
819 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, CTX_CMPS
) + 1;
824 * v7m_using_psp: Return true if using process stack pointer
825 * Return true if the CPU is currently using the process stack
826 * pointer, or false if it is using the main stack pointer.
828 static inline bool v7m_using_psp(CPUARMState
*env
)
830 /* Handler mode always uses the main stack; for thread mode
831 * the CONTROL.SPSEL bit determines the answer.
832 * Note that in v7M it is not possible to be in Handler mode with
833 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
835 return !arm_v7m_is_handler_mode(env
) &&
836 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
840 * v7m_sp_limit: Return SP limit for current CPU state
841 * Return the SP limit value for the current CPU security state
844 static inline uint32_t v7m_sp_limit(CPUARMState
*env
)
846 if (v7m_using_psp(env
)) {
847 return env
->v7m
.psplim
[env
->v7m
.secure
];
849 return env
->v7m
.msplim
[env
->v7m
.secure
];
855 * Return true if the v7M CPACR permits access to the FPU for the specified
856 * security state and privilege level.
858 static inline bool v7m_cpacr_pass(CPUARMState
*env
,
859 bool is_secure
, bool is_priv
)
861 switch (extract32(env
->v7m
.cpacr
[is_secure
], 20, 2)) {
863 case 2: /* UNPREDICTABLE: we treat like 0 */
870 g_assert_not_reached();
875 * aarch32_mode_name(): Return name of the AArch32 CPU mode
876 * @psr: Program Status Register indicating CPU mode
878 * Returns, for debug logging purposes, a printable representation
879 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
880 * the low bits of the specified PSR.
882 static inline const char *aarch32_mode_name(uint32_t psr
)
884 static const char cpu_mode_names
[16][4] = {
885 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
886 "???", "???", "hyp", "und", "???", "???", "???", "sys"
889 return cpu_mode_names
[psr
& 0xf];
893 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
895 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
896 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
897 * Must be called with the iothread lock held.
899 void arm_cpu_update_virq(ARMCPU
*cpu
);
902 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
904 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
905 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
906 * Must be called with the iothread lock held.
908 void arm_cpu_update_vfiq(ARMCPU
*cpu
);
912 * @env: The cpu environment
913 * @el: The EL to use.
915 * Return the full ARMMMUIdx for the translation regime for EL.
917 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
);
921 * @env: The cpu environment
923 * Return the full ARMMMUIdx for the current translation regime.
925 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
);
928 * arm_stage1_mmu_idx:
929 * @env: The cpu environment
931 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
933 #ifdef CONFIG_USER_ONLY
934 static inline ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
936 return ARMMMUIdx_Stage1_E0
;
939 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
);
943 * arm_mmu_idx_is_stage1_of_2:
944 * @mmu_idx: The ARMMMUIdx to test
946 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
947 * first stage of a two stage regime.
949 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx
)
952 case ARMMMUIdx_Stage1_E0
:
953 case ARMMMUIdx_Stage1_E1
:
954 case ARMMMUIdx_Stage1_E1_PAN
:
955 case ARMMMUIdx_Stage1_SE0
:
956 case ARMMMUIdx_Stage1_SE1
:
957 case ARMMMUIdx_Stage1_SE1_PAN
:
964 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features
,
965 const ARMISARegisters
*id
)
967 uint32_t valid
= CPSR_M
| CPSR_AIF
| CPSR_IL
| CPSR_NZCV
;
969 if ((features
>> ARM_FEATURE_V4T
) & 1) {
972 if ((features
>> ARM_FEATURE_V5
) & 1) {
973 valid
|= CPSR_Q
; /* V5TE in reality*/
975 if ((features
>> ARM_FEATURE_V6
) & 1) {
976 valid
|= CPSR_E
| CPSR_GE
;
978 if ((features
>> ARM_FEATURE_THUMB2
) & 1) {
981 if (isar_feature_aa32_jazelle(id
)) {
984 if (isar_feature_aa32_pan(id
)) {
987 if (isar_feature_aa32_dit(id
)) {
990 if (isar_feature_aa32_ssbs(id
)) {
997 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters
*id
)
1001 valid
= PSTATE_M
| PSTATE_DAIF
| PSTATE_IL
| PSTATE_SS
| PSTATE_NZCV
;
1002 if (isar_feature_aa64_bti(id
)) {
1003 valid
|= PSTATE_BTYPE
;
1005 if (isar_feature_aa64_pan(id
)) {
1006 valid
|= PSTATE_PAN
;
1008 if (isar_feature_aa64_uao(id
)) {
1009 valid
|= PSTATE_UAO
;
1011 if (isar_feature_aa64_dit(id
)) {
1012 valid
|= PSTATE_DIT
;
1014 if (isar_feature_aa64_ssbs(id
)) {
1015 valid
|= PSTATE_SSBS
;
1017 if (isar_feature_aa64_mte(id
)) {
1018 valid
|= PSTATE_TCO
;
1025 * Parameters of a given virtual address, as extracted from the
1026 * translation control register (TCR) for a given regime.
1028 typedef struct ARMVAParameters
{
1030 unsigned select
: 1;
1038 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
1039 ARMMMUIdx mmu_idx
, bool data
);
1041 static inline int exception_target_el(CPUARMState
*env
)
1043 int target_el
= MAX(1, arm_current_el(env
));
1046 * No such thing as secure EL1 if EL3 is aarch32,
1047 * so update the target EL to EL3 in this case.
1049 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3) && target_el
== 1) {
1056 /* Determine if allocation tags are available. */
1057 static inline bool allocation_tag_access_enabled(CPUARMState
*env
, int el
,
1061 && arm_feature(env
, ARM_FEATURE_EL3
)
1062 && !(env
->cp15
.scr_el3
& SCR_ATA
)) {
1065 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
1066 uint64_t hcr
= arm_hcr_el2_eff(env
);
1067 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
1071 sctlr
&= (el
== 0 ? SCTLR_ATA0
: SCTLR_ATA
);
1075 #ifndef CONFIG_USER_ONLY
1077 /* Security attributes for an address, as returned by v8m_security_lookup. */
1078 typedef struct V8M_SAttributes
{
1079 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
1088 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
1089 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1090 V8M_SAttributes
*sattrs
);
1092 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
1093 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1094 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
1095 int *prot
, bool *is_subpage
,
1096 ARMMMUFaultInfo
*fi
, uint32_t *mregion
);
1098 /* Cacheability and shareability attributes for a memory access */
1099 typedef struct ARMCacheAttrs
{
1100 unsigned int attrs
:8; /* as in the MAIR register encoding */
1101 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
1104 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
1105 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1106 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
1107 target_ulong
*page_size
,
1108 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
1109 __attribute__((nonnull
));
1111 void arm_log_exception(int idx
);
1113 #endif /* !CONFIG_USER_ONLY */
1116 * The log2 of the words in the tag block, for GMID_EL1.BS.
1117 * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1119 #define GMID_EL1_BS 6
1121 /* We associate one allocation tag per 16 bytes, the minimum. */
1122 #define LOG2_TAG_GRANULE 4
1123 #define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
1126 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1127 * the same simd_desc() encoding due to restrictions on size.
1128 * Use these instead.
1130 FIELD(PREDDESC
, OPRSZ
, 0, 6)
1131 FIELD(PREDDESC
, ESZ
, 6, 2)
1132 FIELD(PREDDESC
, DATA
, 8, 24)
1135 * The SVE simd_data field, for memory ops, contains either
1136 * rd (5 bits) or a shift count (2 bits).
1138 #define SVE_MTEDESC_SHIFT 5
1140 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1141 FIELD(MTEDESC
, MIDX
, 0, 4)
1142 FIELD(MTEDESC
, TBI
, 4, 2)
1143 FIELD(MTEDESC
, TCMA
, 6, 2)
1144 FIELD(MTEDESC
, WRITE
, 8, 1)
1145 FIELD(MTEDESC
, ESIZE
, 9, 5)
1146 FIELD(MTEDESC
, TSIZE
, 14, 10) /* mte_checkN only */
1148 bool mte_probe1(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
);
1149 uint64_t mte_check1(CPUARMState
*env
, uint32_t desc
,
1150 uint64_t ptr
, uintptr_t ra
);
1151 uint64_t mte_checkN(CPUARMState
*env
, uint32_t desc
,
1152 uint64_t ptr
, uintptr_t ra
);
1154 static inline int allocation_tag_from_addr(uint64_t ptr
)
1156 return extract64(ptr
, 56, 4);
1159 static inline uint64_t address_with_allocation_tag(uint64_t ptr
, int rtag
)
1161 return deposit64(ptr
, 56, 4, rtag
);
1164 /* Return true if tbi bits mean that the access is checked. */
1165 static inline bool tbi_check(uint32_t desc
, int bit55
)
1167 return (desc
>> (R_MTEDESC_TBI_SHIFT
+ bit55
)) & 1;
1170 /* Return true if tcma bits mean that the access is unchecked. */
1171 static inline bool tcma_check(uint32_t desc
, int bit55
, int ptr_tag
)
1174 * We had extracted bit55 and ptr_tag for other reasons, so fold
1175 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1177 bool match
= ((ptr_tag
+ bit55
) & 0xf) == 0;
1178 bool tcma
= (desc
>> (R_MTEDESC_TCMA_SHIFT
+ bit55
)) & 1;
1179 return tcma
&& match
;
1183 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1184 * for the tag to be present in the FAR_ELx register. But for user-only
1185 * mode, we do not have a TLB with which to implement this, so we must
1186 * remove the top byte.
1188 static inline uint64_t useronly_clean_ptr(uint64_t ptr
)
1190 #ifdef CONFIG_USER_ONLY
1191 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1192 ptr
&= sextract64(ptr
, 0, 56);
1197 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc
, uint64_t ptr
)
1199 #ifdef CONFIG_USER_ONLY
1200 int64_t clean_ptr
= sextract64(ptr
, 0, 56);
1201 if (tbi_check(desc
, clean_ptr
< 0)) {