2 * ARM page table walking.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
11 #include "qemu/range.h"
12 #include "qemu/main-loop.h"
13 #include "exec/exec-all.h"
15 #include "internals.h"
16 #include "cpu-features.h"
19 # include "tcg/oversized-guest.h"
22 typedef struct S1Translate
{
24 * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
25 * Together with in_space, specifies the architectural translation regime.
29 * in_ptw_idx: specifies which mmuidx to use for the actual
30 * page table descriptor load operations. This will be one of the
31 * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
32 * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
33 * this field is updated accordingly.
37 * in_space: the security space for this walk. This plus
38 * the in_mmu_idx specify the architectural translation regime.
39 * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
40 * this field is updated accordingly.
42 * Note that the security space for the in_ptw_idx may be different
43 * from that for the in_mmu_idx. We do not need to explicitly track
44 * the in_ptw_idx security space because:
45 * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
46 * itself specifies the security space
47 * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
48 * space used for ptw reads is the same as that of the security
49 * space of the stage 1 translation for all cases except where
50 * stage 1 is Secure; in that case the only possibilities for
51 * the ptw read are Secure and NonSecure, and the in_ptw_idx
52 * value being Stage2 vs Stage2_S distinguishes those.
54 ARMSecuritySpace in_space
;
56 * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
57 * accesses will not update the guest page table access flags
58 * and will not change the state of the softmmu TLBs.
62 * If this is stage 2 of a stage 1+2 page table walk, then this must
63 * be true if stage 1 is an EL0 access; otherwise this is ignored.
64 * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
69 ARMSecuritySpace out_space
;
75 static bool get_phys_addr_nogpc(CPUARMState
*env
, S1Translate
*ptw
,
77 MMUAccessType access_type
,
78 GetPhysAddrResult
*result
,
81 static bool get_phys_addr_gpc(CPUARMState
*env
, S1Translate
*ptw
,
83 MMUAccessType access_type
,
84 GetPhysAddrResult
*result
,
87 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
88 static const uint8_t pamax_map
[] = {
99 * The cpu-specific constant value of PAMax; also used by hw/arm/virt.
100 * Note that machvirt_init calls this on a CPU that is inited but not realized!
102 unsigned int arm_pamax(ARMCPU
*cpu
)
104 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
105 unsigned int parange
=
106 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
109 * id_aa64mmfr0 is a read-only register so values outside of the
110 * supported mappings can be considered an implementation error.
112 assert(parange
< ARRAY_SIZE(pamax_map
));
113 return pamax_map
[parange
];
116 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
117 /* v7 or v8 with LPAE */
125 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
127 ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
130 case ARMMMUIdx_E10_0
:
131 return ARMMMUIdx_Stage1_E0
;
132 case ARMMMUIdx_E10_1
:
133 return ARMMMUIdx_Stage1_E1
;
134 case ARMMMUIdx_E10_1_PAN
:
135 return ARMMMUIdx_Stage1_E1_PAN
;
141 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
143 return stage_1_mmu_idx(arm_mmu_idx(env
));
147 * Return where we should do ptw loads from for a stage 2 walk.
148 * This depends on whether the address we are looking up is a
149 * Secure IPA or a NonSecure IPA, which we know from whether this is
150 * Stage2 or Stage2_S.
151 * If this is the Secure EL1&0 regime we need to check the NSW and SW bits.
153 static ARMMMUIdx
ptw_idx_for_stage_2(CPUARMState
*env
, ARMMMUIdx stage2idx
)
158 * We're OK to check the current state of the CPU here because
159 * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit
161 * (2) there's no way to do a lookup that cares about Stage 2 for a
162 * different security state to the current one for AArch64, and AArch32
163 * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
164 * an NS stage 1+2 lookup while the NS bit is 0.)
166 if (!arm_el_is_aa64(env
, 3)) {
167 return ARMMMUIdx_Phys_NS
;
170 switch (arm_security_space_below_el3(env
)) {
171 case ARMSS_NonSecure
:
172 return ARMMMUIdx_Phys_NS
;
174 return ARMMMUIdx_Phys_Realm
;
176 if (stage2idx
== ARMMMUIdx_Stage2_S
) {
177 s2walk_secure
= !(env
->cp15
.vstcr_el2
& VSTCR_SW
);
179 s2walk_secure
= !(env
->cp15
.vtcr_el2
& VTCR_NSW
);
181 return s2walk_secure
? ARMMMUIdx_Phys_S
: ARMMMUIdx_Phys_NS
;
183 g_assert_not_reached();
187 static bool regime_translation_big_endian(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
189 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
192 /* Return the TTBR associated with this translation regime */
193 static uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ttbrn
)
195 if (mmu_idx
== ARMMMUIdx_Stage2
) {
196 return env
->cp15
.vttbr_el2
;
198 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
199 return env
->cp15
.vsttbr_el2
;
202 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
204 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
208 /* Return true if the specified stage of address translation is disabled */
209 static bool regime_translation_disabled(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
210 ARMSecuritySpace space
)
214 if (arm_feature(env
, ARM_FEATURE_M
)) {
215 bool is_secure
= arm_space_is_secure(space
);
216 switch (env
->v7m
.mpu_ctrl
[is_secure
] &
217 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
218 case R_V7M_MPU_CTRL_ENABLE_MASK
:
219 /* Enabled, but not for HardFault and NMI */
220 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
221 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
222 /* Enabled for all cases */
227 * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
228 * we warned about that in armv7m_nvic.c when the guest set it.
236 case ARMMMUIdx_Stage2
:
237 case ARMMMUIdx_Stage2_S
:
238 /* HCR.DC means HCR.VM behaves as 1 */
239 hcr_el2
= arm_hcr_el2_eff_secstate(env
, space
);
240 return (hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
242 case ARMMMUIdx_E10_0
:
243 case ARMMMUIdx_E10_1
:
244 case ARMMMUIdx_E10_1_PAN
:
245 /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
246 hcr_el2
= arm_hcr_el2_eff_secstate(env
, space
);
247 if (hcr_el2
& HCR_TGE
) {
252 case ARMMMUIdx_Stage1_E0
:
253 case ARMMMUIdx_Stage1_E1
:
254 case ARMMMUIdx_Stage1_E1_PAN
:
255 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
256 hcr_el2
= arm_hcr_el2_eff_secstate(env
, space
);
257 if (hcr_el2
& HCR_DC
) {
262 case ARMMMUIdx_E20_0
:
263 case ARMMMUIdx_E20_2
:
264 case ARMMMUIdx_E20_2_PAN
:
269 case ARMMMUIdx_Phys_S
:
270 case ARMMMUIdx_Phys_NS
:
271 case ARMMMUIdx_Phys_Root
:
272 case ARMMMUIdx_Phys_Realm
:
273 /* No translation for physical address spaces. */
277 g_assert_not_reached();
280 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
283 static bool granule_protection_check(CPUARMState
*env
, uint64_t paddress
,
284 ARMSecuritySpace pspace
,
291 ARMCPU
*cpu
= env_archcpu(env
);
292 uint64_t gpccr
= env
->cp15
.gpccr_el3
;
293 unsigned pps
, pgs
, l0gptsz
, level
= 0;
294 uint64_t tableaddr
, pps_mask
, align
, entry
, index
;
299 if (!FIELD_EX64(gpccr
, GPCCR
, GPC
)) {
304 * GPC Priority 1 (R_GMGRR):
305 * R_JWCSM: If the configuration of GPCCR_EL3 is invalid,
306 * the access fails as GPT walk fault at level 0.
310 * Configuration of PPS to a value exceeding the implemented
311 * physical address size is invalid.
313 pps
= FIELD_EX64(gpccr
, GPCCR
, PPS
);
314 if (pps
> FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
)) {
317 pps
= pamax_map
[pps
];
318 pps_mask
= MAKE_64BIT_MASK(0, pps
);
320 switch (FIELD_EX64(gpccr
, GPCCR
, SH
)) {
321 case 0b10: /* outer shareable */
323 case 0b00: /* non-shareable */
324 case 0b11: /* inner shareable */
325 /* Inner and Outer non-cacheable requires Outer shareable. */
326 if (FIELD_EX64(gpccr
, GPCCR
, ORGN
) == 0 &&
327 FIELD_EX64(gpccr
, GPCCR
, IRGN
) == 0) {
331 default: /* reserved */
335 switch (FIELD_EX64(gpccr
, GPCCR
, PGS
)) {
339 case 0b01: /* 64KB */
342 case 0b10: /* 16KB */
345 default: /* reserved */
349 /* Note this field is read-only and fixed at reset. */
350 l0gptsz
= 30 + FIELD_EX64(gpccr
, GPCCR
, L0GPTSZ
);
353 * GPC Priority 2: Secure, Realm or Root address exceeds PPS.
354 * R_CPDSB: A NonSecure physical address input exceeding PPS
355 * does not experience any fault.
357 if (paddress
& ~pps_mask
) {
358 if (pspace
== ARMSS_NonSecure
) {
364 /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
365 tableaddr
= env
->cp15
.gptbr_el3
<< 12;
366 if (tableaddr
& ~pps_mask
) {
371 * BADDR is aligned per a function of PPS and L0GPTSZ.
372 * These bits of GPTBR_EL3 are RES0, but are not a configuration error,
373 * unlike the RES0 bits of the GPT entries (R_XNKFZ).
375 align
= MAX(pps
- l0gptsz
+ 3, 12);
376 align
= MAKE_64BIT_MASK(0, align
);
379 as
= arm_addressspace(env_cpu(env
), attrs
);
381 /* Level 0 lookup. */
382 index
= extract64(paddress
, l0gptsz
, pps
- l0gptsz
);
383 tableaddr
+= index
* 8;
384 entry
= address_space_ldq_le(as
, tableaddr
, attrs
, &result
);
385 if (result
!= MEMTX_OK
) {
389 switch (extract32(entry
, 0, 4)) {
390 case 1: /* block descriptor */
392 goto fault_walk
; /* RES0 bits not 0 */
394 gpi
= extract32(entry
, 4, 4);
396 case 3: /* table descriptor */
397 tableaddr
= entry
& ~0xf;
398 align
= MAX(l0gptsz
- pgs
- 1, 12);
399 align
= MAKE_64BIT_MASK(0, align
);
400 if (tableaddr
& (~pps_mask
| align
)) {
401 goto fault_walk
; /* RES0 bits not 0 */
404 default: /* invalid */
410 index
= extract64(paddress
, pgs
+ 4, l0gptsz
- pgs
- 4);
411 tableaddr
+= index
* 8;
412 entry
= address_space_ldq_le(as
, tableaddr
, attrs
, &result
);
413 if (result
!= MEMTX_OK
) {
417 switch (extract32(entry
, 0, 4)) {
418 case 1: /* contiguous descriptor */
420 goto fault_walk
; /* RES0 bits not 0 */
423 * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE,
424 * and because we cannot invalidate by pa, and thus will always
425 * flush entire tlbs, we don't actually care about the range here
426 * and can simply extract the GPI as the result.
428 if (extract32(entry
, 8, 2) == 0) {
429 goto fault_walk
; /* reserved contig */
431 gpi
= extract32(entry
, 4, 4);
434 index
= extract64(paddress
, pgs
, 4);
435 gpi
= extract64(entry
, index
* 4, 4);
441 case 0b0000: /* no access */
443 case 0b1111: /* all access */
449 if (pspace
== (gpi
& 3)) {
454 goto fault_walk
; /* reserved */
457 fi
->gpcf
= GPCF_Fail
;
460 fi
->gpcf
= GPCF_EABT
;
463 fi
->gpcf
= GPCF_AddressSize
;
466 fi
->gpcf
= GPCF_Walk
;
469 fi
->paddr
= paddress
;
470 fi
->paddr_space
= pspace
;
474 static bool S1_attrs_are_device(uint8_t attrs
)
477 * This slightly under-decodes the MAIR_ELx field:
478 * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE;
479 * 0b0000dd1x is UNPREDICTABLE.
481 return (attrs
& 0xf0) == 0;
484 static bool S2_attrs_are_device(uint64_t hcr
, uint8_t attrs
)
487 * For an S1 page table walk, the stage 1 attributes are always
488 * some form of "this is Normal memory". The combined S1+S2
489 * attributes are therefore only Device if stage 2 specifies Device.
490 * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
491 * ie when cacheattrs.attrs bits [3:2] are 0b00.
492 * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
493 * when cacheattrs.attrs bit [2] is 0.
496 return (attrs
& 0x4) == 0;
498 return (attrs
& 0xc) == 0;
502 static ARMSecuritySpace
S2_security_space(ARMSecuritySpace s1_space
,
503 ARMMMUIdx s2_mmu_idx
)
506 * Return the security space to use for stage 2 when doing
507 * the S1 page table descriptor load.
509 if (regime_is_stage2(s2_mmu_idx
)) {
511 * The security space for ptw reads is almost always the same
512 * as that of the security space of the stage 1 translation.
513 * The only exception is when stage 1 is Secure; in that case
514 * the ptw read might be to the Secure or the NonSecure space
515 * (but never Realm or Root), and the s2_mmu_idx tells us which.
516 * Root translations are always single-stage.
518 if (s1_space
== ARMSS_Secure
) {
519 return arm_secure_to_space(s2_mmu_idx
== ARMMMUIdx_Stage2_S
);
521 assert(s2_mmu_idx
!= ARMMMUIdx_Stage2_S
);
522 assert(s1_space
!= ARMSS_Root
);
526 /* ptw loads are from phys: the mmu idx itself says which space */
527 return arm_phys_to_space(s2_mmu_idx
);
531 static bool fault_s1ns(ARMSecuritySpace space
, ARMMMUIdx s2_mmu_idx
)
534 * For stage 2 faults in Secure EL22, S1NS indicates
535 * whether the faulting IPA is in the Secure or NonSecure
536 * IPA space. For all other kinds of fault, it is false.
538 return space
== ARMSS_Secure
&& regime_is_stage2(s2_mmu_idx
)
539 && s2_mmu_idx
== ARMMMUIdx_Stage2_S
;
542 /* Translate a S1 pagetable walk through S2 if needed. */
543 static bool S1_ptw_translate(CPUARMState
*env
, S1Translate
*ptw
,
544 hwaddr addr
, ARMMMUFaultInfo
*fi
)
546 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
547 ARMMMUIdx s2_mmu_idx
= ptw
->in_ptw_idx
;
550 ptw
->out_virt
= addr
;
552 if (unlikely(ptw
->in_debug
)) {
554 * From gdbstub, do not use softmmu so that we don't modify the
555 * state of the cpu at all, including softmmu tlb contents.
557 ARMSecuritySpace s2_space
= S2_security_space(ptw
->in_space
, s2_mmu_idx
);
558 S1Translate s2ptw
= {
559 .in_mmu_idx
= s2_mmu_idx
,
560 .in_ptw_idx
= ptw_idx_for_stage_2(env
, s2_mmu_idx
),
561 .in_space
= s2_space
,
564 GetPhysAddrResult s2
= { };
566 if (get_phys_addr_gpc(env
, &s2ptw
, addr
, MMU_DATA_LOAD
, &s2
, fi
)) {
570 ptw
->out_phys
= s2
.f
.phys_addr
;
571 pte_attrs
= s2
.cacheattrs
.attrs
;
572 ptw
->out_host
= NULL
;
574 ptw
->out_space
= s2
.f
.attrs
.space
;
577 CPUTLBEntryFull
*full
;
581 flags
= probe_access_full_mmu(env
, addr
, 0, MMU_DATA_LOAD
,
582 arm_to_core_mmu_idx(s2_mmu_idx
),
583 &ptw
->out_host
, &full
);
586 if (unlikely(flags
& TLB_INVALID_MASK
)) {
589 ptw
->out_phys
= full
->phys_addr
| (addr
& ~TARGET_PAGE_MASK
);
590 ptw
->out_rw
= full
->prot
& PAGE_WRITE
;
591 pte_attrs
= full
->extra
.arm
.pte_attrs
;
592 ptw
->out_space
= full
->attrs
.space
;
594 g_assert_not_reached();
598 if (regime_is_stage2(s2_mmu_idx
)) {
599 uint64_t hcr
= arm_hcr_el2_eff_secstate(env
, ptw
->in_space
);
601 if ((hcr
& HCR_PTW
) && S2_attrs_are_device(hcr
, pte_attrs
)) {
603 * PTW set and S1 walk touched S2 Device memory:
604 * generate Permission fault.
606 fi
->type
= ARMFault_Permission
;
610 fi
->s1ns
= fault_s1ns(ptw
->in_space
, s2_mmu_idx
);
615 ptw
->out_be
= regime_translation_big_endian(env
, mmu_idx
);
619 assert(fi
->type
!= ARMFault_None
);
620 if (fi
->type
== ARMFault_GPCFOnOutput
) {
621 fi
->type
= ARMFault_GPCFOnWalk
;
624 fi
->stage2
= regime_is_stage2(s2_mmu_idx
);
625 fi
->s1ptw
= fi
->stage2
;
626 fi
->s1ns
= fault_s1ns(ptw
->in_space
, s2_mmu_idx
);
630 /* All loads done in the course of a page table walk go through here. */
631 static uint32_t arm_ldl_ptw(CPUARMState
*env
, S1Translate
*ptw
,
634 CPUState
*cs
= env_cpu(env
);
635 void *host
= ptw
->out_host
;
639 /* Page tables are in RAM, and we have the host address. */
640 data
= qatomic_read((uint32_t *)host
);
642 data
= be32_to_cpu(data
);
644 data
= le32_to_cpu(data
);
647 /* Page tables are in MMIO. */
649 .space
= ptw
->out_space
,
650 .secure
= arm_space_is_secure(ptw
->out_space
),
652 AddressSpace
*as
= arm_addressspace(cs
, attrs
);
653 MemTxResult result
= MEMTX_OK
;
656 data
= address_space_ldl_be(as
, ptw
->out_phys
, attrs
, &result
);
658 data
= address_space_ldl_le(as
, ptw
->out_phys
, attrs
, &result
);
660 if (unlikely(result
!= MEMTX_OK
)) {
661 fi
->type
= ARMFault_SyncExternalOnWalk
;
662 fi
->ea
= arm_extabort_type(result
);
669 static uint64_t arm_ldq_ptw(CPUARMState
*env
, S1Translate
*ptw
,
672 CPUState
*cs
= env_cpu(env
);
673 void *host
= ptw
->out_host
;
677 /* Page tables are in RAM, and we have the host address. */
678 #ifdef CONFIG_ATOMIC64
679 data
= qatomic_read__nocheck((uint64_t *)host
);
681 data
= be64_to_cpu(data
);
683 data
= le64_to_cpu(data
);
687 data
= ldq_be_p(host
);
689 data
= ldq_le_p(host
);
693 /* Page tables are in MMIO. */
695 .space
= ptw
->out_space
,
696 .secure
= arm_space_is_secure(ptw
->out_space
),
698 AddressSpace
*as
= arm_addressspace(cs
, attrs
);
699 MemTxResult result
= MEMTX_OK
;
702 data
= address_space_ldq_be(as
, ptw
->out_phys
, attrs
, &result
);
704 data
= address_space_ldq_le(as
, ptw
->out_phys
, attrs
, &result
);
706 if (unlikely(result
!= MEMTX_OK
)) {
707 fi
->type
= ARMFault_SyncExternalOnWalk
;
708 fi
->ea
= arm_extabort_type(result
);
715 static uint64_t arm_casq_ptw(CPUARMState
*env
, uint64_t old_val
,
716 uint64_t new_val
, S1Translate
*ptw
,
719 #if defined(TARGET_AARCH64) && defined(CONFIG_TCG)
721 void *host
= ptw
->out_host
;
723 if (unlikely(!host
)) {
724 /* Page table in MMIO Memory Region */
725 CPUState
*cs
= env_cpu(env
);
727 .space
= ptw
->out_space
,
728 .secure
= arm_space_is_secure(ptw
->out_space
),
730 AddressSpace
*as
= arm_addressspace(cs
, attrs
);
731 MemTxResult result
= MEMTX_OK
;
732 bool need_lock
= !bql_locked();
738 cur_val
= address_space_ldq_be(as
, ptw
->out_phys
, attrs
, &result
);
739 if (unlikely(result
!= MEMTX_OK
)) {
740 fi
->type
= ARMFault_SyncExternalOnWalk
;
741 fi
->ea
= arm_extabort_type(result
);
747 if (cur_val
== old_val
) {
748 address_space_stq_be(as
, ptw
->out_phys
, new_val
, attrs
, &result
);
749 if (unlikely(result
!= MEMTX_OK
)) {
750 fi
->type
= ARMFault_SyncExternalOnWalk
;
751 fi
->ea
= arm_extabort_type(result
);
760 cur_val
= address_space_ldq_le(as
, ptw
->out_phys
, attrs
, &result
);
761 if (unlikely(result
!= MEMTX_OK
)) {
762 fi
->type
= ARMFault_SyncExternalOnWalk
;
763 fi
->ea
= arm_extabort_type(result
);
769 if (cur_val
== old_val
) {
770 address_space_stq_le(as
, ptw
->out_phys
, new_val
, attrs
, &result
);
771 if (unlikely(result
!= MEMTX_OK
)) {
772 fi
->type
= ARMFault_SyncExternalOnWalk
;
773 fi
->ea
= arm_extabort_type(result
);
789 * Raising a stage2 Protection fault for an atomic update to a read-only
790 * page is delayed until it is certain that there is a change to make.
792 if (unlikely(!ptw
->out_rw
)) {
796 flags
= probe_access_full_mmu(env
, ptw
->out_virt
, 0,
798 arm_to_core_mmu_idx(ptw
->in_ptw_idx
),
802 if (unlikely(flags
& TLB_INVALID_MASK
)) {
804 * We know this must be a stage 2 fault because the granule
805 * protection table does not separately track read and write
806 * permission, so all GPC faults are caught in S1_ptw_translate():
807 * we only get here for "readable but not writeable".
809 assert(fi
->type
!= ARMFault_None
);
810 fi
->s2addr
= ptw
->out_virt
;
813 fi
->s1ns
= fault_s1ns(ptw
->in_space
, ptw
->in_ptw_idx
);
817 /* In case CAS mismatches and we loop, remember writability. */
821 #ifdef CONFIG_ATOMIC64
823 old_val
= cpu_to_be64(old_val
);
824 new_val
= cpu_to_be64(new_val
);
825 cur_val
= qatomic_cmpxchg__nocheck((uint64_t *)host
, old_val
, new_val
);
826 cur_val
= be64_to_cpu(cur_val
);
828 old_val
= cpu_to_le64(old_val
);
829 new_val
= cpu_to_le64(new_val
);
830 cur_val
= qatomic_cmpxchg__nocheck((uint64_t *)host
, old_val
, new_val
);
831 cur_val
= le64_to_cpu(cur_val
);
835 * We can't support the full 64-bit atomic cmpxchg on the host.
836 * Because this is only used for FEAT_HAFDBS, which is only for AA64,
837 * we know that TCG_OVERSIZED_GUEST is set, which means that we are
838 * running in round-robin mode and could only race with dma i/o.
840 #if !TCG_OVERSIZED_GUEST
841 # error "Unexpected configuration"
843 bool locked
= bql_locked();
848 cur_val
= ldq_be_p(host
);
849 if (cur_val
== old_val
) {
850 stq_be_p(host
, new_val
);
853 cur_val
= ldq_le_p(host
);
854 if (cur_val
== old_val
) {
855 stq_le_p(host
, new_val
);
865 /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */
866 g_assert_not_reached();
870 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
871 uint32_t *table
, uint32_t address
)
873 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
874 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
875 int maskshift
= extract32(tcr
, 0, 3);
876 uint32_t mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
879 if (address
& mask
) {
880 if (tcr
& TTBCR_PD1
) {
881 /* Translation table walk disabled for TTBR1 */
884 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
886 if (tcr
& TTBCR_PD0
) {
887 /* Translation table walk disabled for TTBR0 */
890 base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
891 *table
= regime_ttbr(env
, mmu_idx
, 0) & base_mask
;
893 *table
|= (address
>> 18) & 0x3ffc;
898 * Translate section/page access permissions to page R/W protection flags
900 * @mmu_idx: MMU index indicating required translation regime
901 * @ap: The 3-bit access permissions (AP[2:0])
902 * @domain_prot: The 2-bit domain access permissions
903 * @is_user: TRUE if accessing from PL0
905 static int ap_to_rw_prot_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
906 int ap
, int domain_prot
, bool is_user
)
908 if (domain_prot
== 3) {
909 return PAGE_READ
| PAGE_WRITE
;
914 if (arm_feature(env
, ARM_FEATURE_V7
)) {
917 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
919 return is_user
? 0 : PAGE_READ
;
926 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
931 return PAGE_READ
| PAGE_WRITE
;
934 return PAGE_READ
| PAGE_WRITE
;
935 case 4: /* Reserved. */
938 return is_user
? 0 : PAGE_READ
;
942 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
947 g_assert_not_reached();
952 * Translate section/page access permissions to page R/W protection flags
954 * @mmu_idx: MMU index indicating required translation regime
955 * @ap: The 3-bit access permissions (AP[2:0])
956 * @domain_prot: The 2-bit domain access permissions
958 static int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
959 int ap
, int domain_prot
)
961 return ap_to_rw_prot_is_user(env
, mmu_idx
, ap
, domain_prot
,
962 regime_is_user(env
, mmu_idx
));
966 * Translate section/page access permissions to page R/W protection flags.
967 * @ap: The 2-bit simple AP (AP[2:1])
968 * @is_user: TRUE if accessing from PL0
970 static int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
974 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
976 return PAGE_READ
| PAGE_WRITE
;
978 return is_user
? 0 : PAGE_READ
;
982 g_assert_not_reached();
986 static int simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
988 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
991 static bool get_phys_addr_v5(CPUARMState
*env
, S1Translate
*ptw
,
992 uint32_t address
, MMUAccessType access_type
,
993 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
1005 /* Pagetable walk. */
1006 /* Lookup l1 descriptor. */
1007 if (!get_level1_table_address(env
, ptw
->in_mmu_idx
, &table
, address
)) {
1008 /* Section translation fault if page walk is disabled by PD0 or PD1 */
1009 fi
->type
= ARMFault_Translation
;
1012 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
1015 desc
= arm_ldl_ptw(env
, ptw
, fi
);
1016 if (fi
->type
!= ARMFault_None
) {
1020 domain
= (desc
>> 5) & 0x0f;
1021 if (regime_el(env
, ptw
->in_mmu_idx
) == 1) {
1022 dacr
= env
->cp15
.dacr_ns
;
1024 dacr
= env
->cp15
.dacr_s
;
1026 domain_prot
= (dacr
>> (domain
* 2)) & 3;
1028 /* Section translation fault. */
1029 fi
->type
= ARMFault_Translation
;
1035 if (domain_prot
== 0 || domain_prot
== 2) {
1036 fi
->type
= ARMFault_Domain
;
1041 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
1042 ap
= (desc
>> 10) & 3;
1043 result
->f
.lg_page_size
= 20; /* 1MB */
1045 /* Lookup l2 entry. */
1047 /* Coarse pagetable. */
1048 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
1050 /* Fine pagetable. */
1051 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
1053 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
1056 desc
= arm_ldl_ptw(env
, ptw
, fi
);
1057 if (fi
->type
!= ARMFault_None
) {
1061 case 0: /* Page translation fault. */
1062 fi
->type
= ARMFault_Translation
;
1064 case 1: /* 64k page. */
1065 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
1066 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
1067 result
->f
.lg_page_size
= 16;
1069 case 2: /* 4k page. */
1070 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
1071 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
1072 result
->f
.lg_page_size
= 12;
1074 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
1076 /* ARMv6/XScale extended small page format */
1077 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
1078 || arm_feature(env
, ARM_FEATURE_V6
)) {
1079 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
1080 result
->f
.lg_page_size
= 12;
1083 * UNPREDICTABLE in ARMv5; we choose to take a
1084 * page translation fault.
1086 fi
->type
= ARMFault_Translation
;
1090 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
1091 result
->f
.lg_page_size
= 10;
1093 ap
= (desc
>> 4) & 3;
1096 /* Never happens, but compiler isn't smart enough to tell. */
1097 g_assert_not_reached();
1100 result
->f
.prot
= ap_to_rw_prot(env
, ptw
->in_mmu_idx
, ap
, domain_prot
);
1101 result
->f
.prot
|= result
->f
.prot
? PAGE_EXEC
: 0;
1102 if (!(result
->f
.prot
& (1 << access_type
))) {
1103 /* Access permission fault. */
1104 fi
->type
= ARMFault_Permission
;
1107 result
->f
.phys_addr
= phys_addr
;
1110 fi
->domain
= domain
;
1115 static bool get_phys_addr_v6(CPUARMState
*env
, S1Translate
*ptw
,
1116 uint32_t address
, MMUAccessType access_type
,
1117 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
1119 ARMCPU
*cpu
= env_archcpu(env
);
1120 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
1135 /* Pagetable walk. */
1136 /* Lookup l1 descriptor. */
1137 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
1138 /* Section translation fault if page walk is disabled by PD0 or PD1 */
1139 fi
->type
= ARMFault_Translation
;
1142 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
1145 desc
= arm_ldl_ptw(env
, ptw
, fi
);
1146 if (fi
->type
!= ARMFault_None
) {
1150 if (type
== 0 || (type
== 3 && !cpu_isar_feature(aa32_pxn
, cpu
))) {
1151 /* Section translation fault, or attempt to use the encoding
1152 * which is Reserved on implementations without PXN.
1154 fi
->type
= ARMFault_Translation
;
1157 if ((type
== 1) || !(desc
& (1 << 18))) {
1158 /* Page or Section. */
1159 domain
= (desc
>> 5) & 0x0f;
1161 if (regime_el(env
, mmu_idx
) == 1) {
1162 dacr
= env
->cp15
.dacr_ns
;
1164 dacr
= env
->cp15
.dacr_s
;
1169 domain_prot
= (dacr
>> (domain
* 2)) & 3;
1170 if (domain_prot
== 0 || domain_prot
== 2) {
1171 /* Section or Page domain fault */
1172 fi
->type
= ARMFault_Domain
;
1176 if (desc
& (1 << 18)) {
1178 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
1179 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
1180 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
1181 result
->f
.lg_page_size
= 24; /* 16MB */
1184 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
1185 result
->f
.lg_page_size
= 20; /* 1MB */
1187 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
1188 xn
= desc
& (1 << 4);
1190 ns
= extract32(desc
, 19, 1);
1192 if (cpu_isar_feature(aa32_pxn
, cpu
)) {
1193 pxn
= (desc
>> 2) & 1;
1195 ns
= extract32(desc
, 3, 1);
1196 /* Lookup l2 entry. */
1197 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
1198 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
1201 desc
= arm_ldl_ptw(env
, ptw
, fi
);
1202 if (fi
->type
!= ARMFault_None
) {
1205 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
1207 case 0: /* Page translation fault. */
1208 fi
->type
= ARMFault_Translation
;
1210 case 1: /* 64k page. */
1211 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
1212 xn
= desc
& (1 << 15);
1213 result
->f
.lg_page_size
= 16;
1215 case 2: case 3: /* 4k page. */
1216 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
1218 result
->f
.lg_page_size
= 12;
1221 /* Never happens, but compiler isn't smart enough to tell. */
1222 g_assert_not_reached();
1225 if (domain_prot
== 3) {
1226 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1228 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
1231 if (xn
&& access_type
== MMU_INST_FETCH
) {
1232 fi
->type
= ARMFault_Permission
;
1236 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
1237 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
1238 /* The simplified model uses AP[0] as an access control bit. */
1239 if ((ap
& 1) == 0) {
1240 /* Access flag fault. */
1241 fi
->type
= ARMFault_AccessFlag
;
1244 result
->f
.prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
1245 user_prot
= simple_ap_to_rw_prot_is_user(ap
>> 1, 1);
1247 result
->f
.prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
1248 user_prot
= ap_to_rw_prot_is_user(env
, mmu_idx
, ap
, domain_prot
, 1);
1250 if (result
->f
.prot
&& !xn
) {
1251 result
->f
.prot
|= PAGE_EXEC
;
1253 if (!(result
->f
.prot
& (1 << access_type
))) {
1254 /* Access permission fault. */
1255 fi
->type
= ARMFault_Permission
;
1258 if (regime_is_pan(env
, mmu_idx
) &&
1259 !regime_is_user(env
, mmu_idx
) &&
1261 access_type
!= MMU_INST_FETCH
) {
1262 /* Privileged Access Never fault */
1263 fi
->type
= ARMFault_Permission
;
1268 /* The NS bit will (as required by the architecture) have no effect if
1269 * the CPU doesn't support TZ or this is a non-secure translation
1270 * regime, because the attribute will already be non-secure.
1272 result
->f
.attrs
.secure
= false;
1273 result
->f
.attrs
.space
= ARMSS_NonSecure
;
1275 result
->f
.phys_addr
= phys_addr
;
1278 fi
->domain
= domain
;
1284 * Translate S2 section/page access permissions to protection flags
1286 * @s2ap: The 2-bit stage2 access permissions (S2AP)
1287 * @xn: XN (execute-never) bits
1288 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
1290 static int get_S2prot_noexecute(int s2ap
)
1303 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
, bool s1_is_el0
)
1305 int prot
= get_S2prot_noexecute(s2ap
);
1307 if (cpu_isar_feature(any_tts2uxn
, env_archcpu(env
))) {
1325 g_assert_not_reached();
1328 if (!extract32(xn
, 1, 1)) {
1329 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
1338 * Translate section/page access permissions to protection flags
1340 * @mmu_idx: MMU index indicating required translation regime
1341 * @is_aa64: TRUE if AArch64
1342 * @ap: The 2-bit simple AP (AP[2:1])
1343 * @xn: XN (execute-never) bit
1344 * @pxn: PXN (privileged execute-never) bit
1345 * @in_pa: The original input pa space
1346 * @out_pa: The output pa space, modified by NSTable, NS, and NSE
1348 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
1349 int ap
, int xn
, int pxn
,
1350 ARMSecuritySpace in_pa
, ARMSecuritySpace out_pa
)
1352 ARMCPU
*cpu
= env_archcpu(env
);
1353 bool is_user
= regime_is_user(env
, mmu_idx
);
1354 int prot_rw
, user_rw
;
1358 assert(!regime_is_stage2(mmu_idx
));
1360 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
1365 * PAN controls can forbid data accesses but don't affect insn fetch.
1366 * Plain PAN forbids data accesses if EL0 has data permissions;
1367 * PAN3 forbids data accesses if EL0 has either data or exec perms.
1368 * Note that for AArch64 the 'user can exec' case is exactly !xn.
1369 * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
1370 * do not affect EPAN.
1372 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
1374 } else if (cpu_isar_feature(aa64_pan3
, cpu
) && is_aa64
&&
1375 regime_is_pan(env
, mmu_idx
) &&
1376 (regime_sctlr(env
, mmu_idx
) & SCTLR_EPAN
) && !xn
) {
1379 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
1383 if (in_pa
!= out_pa
) {
1387 * R_ZWRVD: permission fault for insn fetched from non-Root,
1388 * I_WWBFB: SIF has no effect in EL3.
1393 * R_PKTDS: permission fault for insn fetched from non-Realm,
1394 * for Realm EL2 or EL2&0. The corresponding fault for EL1&0
1395 * happens during any stage2 translation.
1399 case ARMMMUIdx_E20_0
:
1400 case ARMMMUIdx_E20_2
:
1401 case ARMMMUIdx_E20_2_PAN
:
1408 if (env
->cp15
.scr_el3
& SCR_SIF
) {
1413 /* Input NonSecure must have output NonSecure. */
1414 g_assert_not_reached();
1418 /* TODO have_wxn should be replaced with
1419 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
1420 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
1421 * compatible processors have EL2, which is required for [U]WXN.
1423 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
1426 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
1430 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
1431 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
1433 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
1434 switch (regime_el(env
, mmu_idx
)) {
1438 xn
= xn
|| !(user_rw
& PAGE_READ
);
1442 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
1444 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
1445 (uwxn
&& (user_rw
& PAGE_WRITE
));
1455 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
1458 return prot_rw
| PAGE_EXEC
;
1461 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
1464 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
1465 uint32_t el
= regime_el(env
, mmu_idx
);
1469 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
1471 if (mmu_idx
== ARMMMUIdx_Stage2
) {
1473 bool sext
= extract32(tcr
, 4, 1);
1474 bool sign
= extract32(tcr
, 3, 1);
1477 * If the sign-extend bit is not the same as t0sz[3], the result
1478 * is unpredictable. Flag this as a guest error.
1481 qemu_log_mask(LOG_GUEST_ERROR
,
1482 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
1484 tsz
= sextract32(tcr
, 0, 4) + 8;
1488 } else if (el
== 2) {
1490 tsz
= extract32(tcr
, 0, 3);
1492 hpd
= extract64(tcr
, 24, 1);
1495 int t0sz
= extract32(tcr
, 0, 3);
1496 int t1sz
= extract32(tcr
, 16, 3);
1499 select
= va
> (0xffffffffu
>> t0sz
);
1501 /* Note that we will detect errors later. */
1502 select
= va
>= ~(0xffffffffu
>> t1sz
);
1506 epd
= extract32(tcr
, 7, 1);
1507 hpd
= extract64(tcr
, 41, 1);
1510 epd
= extract32(tcr
, 23, 1);
1511 hpd
= extract64(tcr
, 42, 1);
1513 /* For aarch32, hpd0 is not enabled without t2e as well. */
1514 hpd
&= extract32(tcr
, 6, 1);
1517 return (ARMVAParameters
) {
1526 * check_s2_mmu_setup
1528 * @is_aa64: True if the translation regime is in AArch64 state
1529 * @tcr: VTCR_EL2 or VSTCR_EL2
1530 * @ds: Effective value of TCR.DS.
1531 * @iasize: Bitsize of IPAs
1532 * @stride: Page-table stride (See the ARM ARM)
1534 * Decode the starting level of the S2 lookup, returning INT_MIN if
1535 * the configuration is invalid.
1537 static int check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, uint64_t tcr
,
1538 bool ds
, int iasize
, int stride
)
1540 int sl0
, sl2
, startlevel
, granulebits
, levels
;
1541 int s1_min_iasize
, s1_max_iasize
;
1543 sl0
= extract32(tcr
, 6, 2);
1546 * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
1547 * so interleave AArch64.S2StartLevel.
1551 /* SL2 is RES0 unless DS=1 & 4KB granule. */
1552 sl2
= extract64(tcr
, 33, 1);
1559 startlevel
= 2 - sl0
;
1562 if (arm_pamax(cpu
) < 44) {
1567 if (!cpu_isar_feature(aa64_st
, cpu
)) {
1578 if (arm_pamax(cpu
) < 42) {
1588 startlevel
= 3 - sl0
;
1593 if (arm_pamax(cpu
) < 44) {
1600 startlevel
= 3 - sl0
;
1603 g_assert_not_reached();
1607 * Things are simpler for AArch32 EL2, with only 4k pages.
1608 * There is no separate S2InvalidSL function, but AArch32.S2Walk
1609 * begins with walkparms.sl0 in {'1x'}.
1611 assert(stride
== 9);
1615 startlevel
= 2 - sl0
;
1618 /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */
1619 levels
= 3 - startlevel
;
1620 granulebits
= stride
+ 3;
1622 s1_min_iasize
= levels
* stride
+ granulebits
+ 1;
1623 s1_max_iasize
= s1_min_iasize
+ (stride
- 1) + 4;
1625 if (iasize
>= s1_min_iasize
&& iasize
<= s1_max_iasize
) {
1633 static bool lpae_block_desc_valid(ARMCPU
*cpu
, bool ds
,
1634 ARMGranuleSize gran
, int level
)
1637 * See pseudocode AArch46.BlockDescSupported(): block descriptors
1638 * are not valid at all levels, depending on the page size.
1642 return (level
== 0 && ds
) || level
== 1 || level
== 2;
1644 return (level
== 1 && ds
) || level
== 2;
1646 return (level
== 1 && arm_pamax(cpu
) == 52) || level
== 2;
1648 g_assert_not_reached();
1652 static bool nv_nv1_enabled(CPUARMState
*env
, S1Translate
*ptw
)
1654 uint64_t hcr
= arm_hcr_el2_eff_secstate(env
, ptw
->in_space
);
1655 return (hcr
& (HCR_NV
| HCR_NV1
)) == (HCR_NV
| HCR_NV1
);
1659 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
1661 * Returns false if the translation was successful. Otherwise, phys_ptr,
1662 * attrs, prot and page_size may not be filled in, and the populated fsr
1663 * value provides information on why the translation aborted, in the format
1664 * of a long-format DFSR/IFSR fault register, with the following caveat:
1665 * the WnR bit is never set (the caller must do this).
1668 * @ptw: Current and next stage parameters for the walk.
1669 * @address: virtual address to get physical address for
1670 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
1671 * @result: set on translation success,
1672 * @fi: set to fault info if the translation fails
1674 static bool get_phys_addr_lpae(CPUARMState
*env
, S1Translate
*ptw
,
1676 MMUAccessType access_type
,
1677 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
1679 ARMCPU
*cpu
= env_archcpu(env
);
1680 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
1682 ARMVAParameters param
;
1684 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
1685 uint32_t tableattrs
;
1686 target_ulong page_size
;
1689 int addrsize
, inputsize
, outputsize
;
1690 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
1692 uint32_t el
= regime_el(env
, mmu_idx
);
1693 uint64_t descaddrmask
;
1694 bool aarch64
= arm_el_is_aa64(env
, el
);
1695 uint64_t descriptor
, new_descriptor
;
1696 ARMSecuritySpace out_space
;
1699 /* TODO: This code does not support shareability levels. */
1703 param
= aa64_va_parameters(env
, address
, mmu_idx
,
1704 access_type
!= MMU_INST_FETCH
,
1705 !arm_el_is_aa64(env
, 1));
1709 * If TxSZ is programmed to a value larger than the maximum,
1710 * or smaller than the effective minimum, it is IMPLEMENTATION
1711 * DEFINED whether we behave as if the field were programmed
1712 * within bounds, or if a level 0 Translation fault is generated.
1714 * With FEAT_LVA, fault on less than minimum becomes required,
1715 * so our choice is to always raise the fault.
1717 if (param
.tsz_oob
) {
1718 goto do_translation_fault
;
1721 addrsize
= 64 - 8 * param
.tbi
;
1722 inputsize
= 64 - param
.tsz
;
1725 * Bound PS by PARANGE to find the effective output address size.
1726 * ID_AA64MMFR0 is a read-only register so values outside of the
1727 * supported mappings can be considered an implementation error.
1729 ps
= FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
1730 ps
= MIN(ps
, param
.ps
);
1731 assert(ps
< ARRAY_SIZE(pamax_map
));
1732 outputsize
= pamax_map
[ps
];
1735 * With LPA2, the effective output address (OA) size is at most 48 bits
1736 * unless TCR.DS == 1
1738 if (!param
.ds
&& param
.gran
!= Gran64K
) {
1739 outputsize
= MIN(outputsize
, 48);
1742 param
= aa32_va_parameters(env
, address
, mmu_idx
);
1744 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
1745 inputsize
= addrsize
- param
.tsz
;
1750 * We determined the region when collecting the parameters, but we
1751 * have not yet validated that the address is valid for the region.
1752 * Extract the top bits and verify that they all match select.
1754 * For aa32, if inputsize == addrsize, then we have selected the
1755 * region by exclusion in aa32_va_parameters and there is no more
1756 * validation to do here.
1758 if (inputsize
< addrsize
) {
1759 target_ulong top_bits
= sextract64(address
, inputsize
,
1760 addrsize
- inputsize
);
1761 if (-top_bits
!= param
.select
) {
1762 /* The gap between the two regions is a Translation fault */
1763 goto do_translation_fault
;
1767 stride
= arm_granule_bits(param
.gran
) - 3;
1770 * Note that QEMU ignores shareability and cacheability attributes,
1771 * so we don't need to do anything with the SH, ORGN, IRGN fields
1772 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
1773 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1774 * implement any ASID-like capability so we can ignore it (instead
1775 * we will always flush the TLB any time the ASID is changed).
1777 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
1780 * Here we should have set up all the parameters for the translation:
1781 * inputsize, ttbr, epd, stride, tbi
1786 * Translation table walk disabled => Translation fault on TLB miss
1787 * Note: This is always 0 on 64-bit EL2 and EL3.
1789 goto do_translation_fault
;
1792 if (!regime_is_stage2(mmu_idx
)) {
1794 * The starting level depends on the virtual address size (which can
1795 * be up to 48 bits) and the translation granule size. It indicates
1796 * the number of strides (stride bits at a time) needed to
1797 * consume the bits of the input address. In the pseudocode this is:
1798 * level = 4 - RoundUp((inputsize - grainsize) / stride)
1799 * where their 'inputsize' is our 'inputsize', 'grainsize' is
1800 * our 'stride + 3' and 'stride' is our 'stride'.
1801 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1802 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1803 * = 4 - (inputsize - 4) / stride;
1805 level
= 4 - (inputsize
- 4) / stride
;
1807 int startlevel
= check_s2_mmu_setup(cpu
, aarch64
, tcr
, param
.ds
,
1809 if (startlevel
== INT_MIN
) {
1811 goto do_translation_fault
;
1816 indexmask_grainsize
= MAKE_64BIT_MASK(0, stride
+ 3);
1817 indexmask
= MAKE_64BIT_MASK(0, inputsize
- (stride
* (4 - level
)));
1819 /* Now we can extract the actual base address from the TTBR */
1820 descaddr
= extract64(ttbr
, 0, 48);
1823 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1825 * Otherwise, if the base address is out of range, raise AddressSizeFault.
1826 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1827 * but we've just cleared the bits above 47, so simplify the test.
1829 if (outputsize
> 48) {
1830 descaddr
|= extract64(ttbr
, 2, 4) << 48;
1831 } else if (descaddr
>> outputsize
) {
1833 fi
->type
= ARMFault_AddressSize
;
1838 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1839 * and also to mask out CnP (bit 0) which could validly be non-zero.
1841 descaddr
&= ~indexmask
;
1844 * For AArch32, the address field in the descriptor goes up to bit 39
1845 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
1846 * or an AddressSize fault is raised. So for v8 we extract those SBZ
1847 * bits as part of the address, which will be checked via outputsize.
1848 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1849 * the highest bits of a 52-bit output are placed elsewhere.
1852 descaddrmask
= MAKE_64BIT_MASK(0, 50);
1853 } else if (arm_feature(env
, ARM_FEATURE_V8
)) {
1854 descaddrmask
= MAKE_64BIT_MASK(0, 48);
1856 descaddrmask
= MAKE_64BIT_MASK(0, 40);
1858 descaddrmask
&= ~indexmask_grainsize
;
1862 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
1866 * Process the NSTable bit from the previous level. This changes
1867 * the table address space and the output space from Secure to
1868 * NonSecure. With RME, the EL3 translation regime does not change
1869 * from Root to NonSecure.
1871 if (ptw
->in_space
== ARMSS_Secure
1872 && !regime_is_stage2(mmu_idx
)
1873 && extract32(tableattrs
, 4, 1)) {
1875 * Stage2_S -> Stage2 or Phys_S -> Phys_NS
1876 * Assert the relative order of the secure/non-secure indexes.
1878 QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S
+ 1 != ARMMMUIdx_Phys_NS
);
1879 QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S
+ 1 != ARMMMUIdx_Stage2
);
1880 ptw
->in_ptw_idx
+= 1;
1881 ptw
->in_space
= ARMSS_NonSecure
;
1884 if (!S1_ptw_translate(env
, ptw
, descaddr
, fi
)) {
1887 descriptor
= arm_ldq_ptw(env
, ptw
, fi
);
1888 if (fi
->type
!= ARMFault_None
) {
1891 new_descriptor
= descriptor
;
1893 restart_atomic_update
:
1894 if (!(descriptor
& 1) ||
1895 (!(descriptor
& 2) &&
1896 !lpae_block_desc_valid(cpu
, param
.ds
, param
.gran
, level
))) {
1897 /* Invalid, or a block descriptor at an invalid level */
1898 goto do_translation_fault
;
1901 descaddr
= descriptor
& descaddrmask
;
1904 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1905 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
1906 * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
1907 * raise AddressSizeFault.
1909 if (outputsize
> 48) {
1911 descaddr
|= extract64(descriptor
, 8, 2) << 50;
1913 descaddr
|= extract64(descriptor
, 12, 4) << 48;
1915 } else if (descaddr
>> outputsize
) {
1916 fi
->type
= ARMFault_AddressSize
;
1920 if ((descriptor
& 2) && (level
< 3)) {
1922 * Table entry. The top five bits are attributes which may
1923 * propagate down through lower levels of the table (and
1924 * which are all arranged so that 0 means "no effect", so
1925 * we can gather them up by ORing in the bits at each level).
1927 tableattrs
|= extract64(descriptor
, 59, 5);
1929 indexmask
= indexmask_grainsize
;
1934 * Block entry at level 1 or 2, or page entry at level 3.
1935 * These are basically the same thing, although the number
1936 * of bits we pull in from the vaddr varies. Note that although
1937 * descaddrmask masks enough of the low bits of the descriptor
1938 * to give a correct page or table address, the address field
1939 * in a block descriptor is smaller; so we need to explicitly
1940 * clear the lower bits here before ORing in the low vaddr bits.
1942 * Afterward, descaddr is the final physical address.
1944 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
1945 descaddr
&= ~(hwaddr
)(page_size
- 1);
1946 descaddr
|= (address
& (page_size
- 1));
1948 if (likely(!ptw
->in_debug
)) {
1951 * If HA is enabled, prepare to update the descriptor below.
1952 * Otherwise, pass the access fault on to software.
1954 if (!(descriptor
& (1 << 10))) {
1956 new_descriptor
|= 1 << 10; /* AF */
1958 fi
->type
= ARMFault_AccessFlag
;
1965 * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP
1966 * bit for writeback. The actual write protection test may still be
1967 * overridden by tableattrs, to be merged below.
1970 && extract64(descriptor
, 51, 1) /* DBM */
1971 && access_type
== MMU_DATA_STORE
) {
1972 if (regime_is_stage2(mmu_idx
)) {
1973 new_descriptor
|= 1ull << 7; /* set S2AP[1] */
1975 new_descriptor
&= ~(1ull << 7); /* clear AP[2] */
1981 * Extract attributes from the (modified) descriptor, and apply
1982 * table descriptors. Stage 2 table descriptors do not include
1983 * any attribute fields. HPD disables all the table attributes
1984 * except NSTable (which we have already handled).
1986 attrs
= new_descriptor
& (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
1987 if (!regime_is_stage2(mmu_idx
)) {
1989 attrs
|= extract64(tableattrs
, 0, 2) << 53; /* XN, PXN */
1991 * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1992 * means "force PL1 access only", which means forcing AP[1] to 0.
1994 attrs
&= ~(extract64(tableattrs
, 2, 1) << 6); /* !APT[0] => AP[1] */
1995 attrs
|= extract32(tableattrs
, 3, 1) << 7; /* APT[1] => AP[2] */
1999 ap
= extract32(attrs
, 6, 2);
2000 out_space
= ptw
->in_space
;
2001 if (regime_is_stage2(mmu_idx
)) {
2003 * R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
2004 * The bit remains ignored for other security states.
2005 * R_YMCSL: Executing an insn fetched from non-Realm causes
2006 * a stage2 permission fault.
2008 if (out_space
== ARMSS_Realm
&& extract64(attrs
, 55, 1)) {
2009 out_space
= ARMSS_NonSecure
;
2010 result
->f
.prot
= get_S2prot_noexecute(ap
);
2012 xn
= extract64(attrs
, 53, 2);
2013 result
->f
.prot
= get_S2prot(env
, ap
, xn
, ptw
->in_s1_is_el0
);
2016 int nse
, ns
= extract32(attrs
, 5, 1);
2017 switch (out_space
) {
2020 * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime.
2021 * R_XTYPW: NSE and NS together select the output pa space.
2023 nse
= extract32(attrs
, 11, 1);
2024 out_space
= (nse
<< 1) | ns
;
2025 if (out_space
== ARMSS_Secure
&&
2026 !cpu_isar_feature(aa64_sel2
, cpu
)) {
2027 out_space
= ARMSS_NonSecure
;
2032 out_space
= ARMSS_NonSecure
;
2037 case ARMMMUIdx_Stage1_E0
:
2038 case ARMMMUIdx_Stage1_E1
:
2039 case ARMMMUIdx_Stage1_E1_PAN
:
2040 /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */
2043 case ARMMMUIdx_E20_0
:
2044 case ARMMMUIdx_E20_2
:
2045 case ARMMMUIdx_E20_2_PAN
:
2047 * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1,
2048 * NS changes the output to non-secure space.
2051 out_space
= ARMSS_NonSecure
;
2055 g_assert_not_reached();
2058 case ARMSS_NonSecure
:
2059 /* R_QRMFF: For NonSecure state, the NS bit is RES0. */
2062 g_assert_not_reached();
2064 xn
= extract64(attrs
, 54, 1);
2065 pxn
= extract64(attrs
, 53, 1);
2067 if (el
== 1 && nv_nv1_enabled(env
, ptw
)) {
2069 * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page
2070 * descriptor bit 54 holds PXN, 53 is RES0, and the effective value
2071 * of UXN is 0. Similarly for bits 59 and 60 in table descriptors
2072 * (which we have already folded into bits 53 and 54 of attrs).
2073 * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
2074 * Similarly, APTable[0] from the table descriptor is treated as 0;
2075 * we already folded this into AP[1] and squashing that to 0 does
2083 * Note that we modified ptw->in_space earlier for NSTable, but
2084 * result->f.attrs retains a copy of the original security space.
2086 result
->f
.prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, xn
, pxn
,
2087 result
->f
.attrs
.space
, out_space
);
2090 if (!(result
->f
.prot
& (1 << access_type
))) {
2091 fi
->type
= ARMFault_Permission
;
2095 /* If FEAT_HAFDBS has made changes, update the PTE. */
2096 if (new_descriptor
!= descriptor
) {
2097 new_descriptor
= arm_casq_ptw(env
, descriptor
, new_descriptor
, ptw
, fi
);
2098 if (fi
->type
!= ARMFault_None
) {
2102 * I_YZSVV says that if the in-memory descriptor has changed,
2103 * then we must use the information in that new value
2104 * (which might include a different output address, different
2105 * attributes, or generate a fault).
2106 * Restart the handling of the descriptor value from scratch.
2108 if (new_descriptor
!= descriptor
) {
2109 descriptor
= new_descriptor
;
2110 goto restart_atomic_update
;
2114 result
->f
.attrs
.space
= out_space
;
2115 result
->f
.attrs
.secure
= arm_space_is_secure(out_space
);
2117 if (regime_is_stage2(mmu_idx
)) {
2118 result
->cacheattrs
.is_s2_format
= true;
2119 result
->cacheattrs
.attrs
= extract32(attrs
, 2, 4);
2121 * Security state does not really affect HCR_EL2.FWB;
2122 * we only need to filter FWB for aa32 or other FEAT.
2124 device
= S2_attrs_are_device(arm_hcr_el2_eff(env
),
2125 result
->cacheattrs
.attrs
);
2127 /* Index into MAIR registers for cache attributes */
2128 uint8_t attrindx
= extract32(attrs
, 2, 3);
2129 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
2130 assert(attrindx
<= 7);
2131 result
->cacheattrs
.is_s2_format
= false;
2132 result
->cacheattrs
.attrs
= extract64(mair
, attrindx
* 8, 8);
2134 /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
2135 if (aarch64
&& cpu_isar_feature(aa64_bti
, cpu
)) {
2136 result
->f
.extra
.arm
.guarded
= extract64(attrs
, 50, 1); /* GP */
2138 device
= S1_attrs_are_device(result
->cacheattrs
.attrs
);
2142 * Enable alignment checks on Device memory.
2144 * Per R_XCHFJ, this check is mis-ordered. The correct ordering
2145 * for alignment, permission, and stage 2 faults should be:
2146 * - Alignment fault caused by the memory type
2147 * - Permission fault
2148 * - A stage 2 fault on the memory access
2149 * but due to the way the TCG softmmu TLB operates, we will have
2150 * implicitly done the permission check and the stage2 lookup in
2151 * finding the TLB entry, so the alignment check cannot be done sooner.
2153 * In v7, for a CPU without the Virtualization Extensions this
2154 * access is UNPREDICTABLE; we choose to make it take the alignment
2155 * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
2156 * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
2159 result
->f
.tlb_fill_flags
|= TLB_CHECK_ALIGNED
;
2163 * For FEAT_LPA2 and effective DS, the SH field in the attributes
2164 * was re-purposed for output address bits. The SH attribute in
2165 * that case comes from TCR_ELx, which we extracted earlier.
2168 result
->cacheattrs
.shareability
= param
.sh
;
2170 result
->cacheattrs
.shareability
= extract32(attrs
, 8, 2);
2173 result
->f
.phys_addr
= descaddr
;
2174 result
->f
.lg_page_size
= ctz64(page_size
);
2177 do_translation_fault
:
2178 fi
->type
= ARMFault_Translation
;
2181 /* Retain the existing stage 2 fi->level */
2185 fi
->stage2
= regime_is_stage2(mmu_idx
);
2187 fi
->s1ns
= fault_s1ns(ptw
->in_space
, mmu_idx
);
2191 static bool get_phys_addr_pmsav5(CPUARMState
*env
,
2194 MMUAccessType access_type
,
2195 GetPhysAddrResult
*result
,
2196 ARMMMUFaultInfo
*fi
)
2201 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
2202 bool is_user
= regime_is_user(env
, mmu_idx
);
2204 if (regime_translation_disabled(env
, mmu_idx
, ptw
->in_space
)) {
2206 result
->f
.phys_addr
= address
;
2207 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2211 result
->f
.phys_addr
= address
;
2212 for (n
= 7; n
>= 0; n
--) {
2213 base
= env
->cp15
.c6_region
[n
];
2214 if ((base
& 1) == 0) {
2217 mask
= 1 << ((base
>> 1) & 0x1f);
2218 /* Keep this shift separate from the above to avoid an
2219 (undefined) << 32. */
2220 mask
= (mask
<< 1) - 1;
2221 if (((base
^ address
) & ~mask
) == 0) {
2226 fi
->type
= ARMFault_Background
;
2230 if (access_type
== MMU_INST_FETCH
) {
2231 mask
= env
->cp15
.pmsav5_insn_ap
;
2233 mask
= env
->cp15
.pmsav5_data_ap
;
2235 mask
= (mask
>> (n
* 4)) & 0xf;
2238 fi
->type
= ARMFault_Permission
;
2243 fi
->type
= ARMFault_Permission
;
2247 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
;
2250 result
->f
.prot
= PAGE_READ
;
2252 result
->f
.prot
|= PAGE_WRITE
;
2256 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
;
2260 fi
->type
= ARMFault_Permission
;
2264 result
->f
.prot
= PAGE_READ
;
2267 result
->f
.prot
= PAGE_READ
;
2270 /* Bad permission. */
2271 fi
->type
= ARMFault_Permission
;
2275 result
->f
.prot
|= PAGE_EXEC
;
2279 static void get_phys_addr_pmsav7_default(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
2280 int32_t address
, uint8_t *prot
)
2282 if (!arm_feature(env
, ARM_FEATURE_M
)) {
2283 *prot
= PAGE_READ
| PAGE_WRITE
;
2285 case 0xF0000000 ... 0xFFFFFFFF:
2286 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
2287 /* hivecs execing is ok */
2291 case 0x00000000 ... 0x7FFFFFFF:
2296 /* Default system address map for M profile cores.
2297 * The architecture specifies which regions are execute-never;
2298 * at the MPU level no other checks are defined.
2301 case 0x00000000 ... 0x1fffffff: /* ROM */
2302 case 0x20000000 ... 0x3fffffff: /* SRAM */
2303 case 0x60000000 ... 0x7fffffff: /* RAM */
2304 case 0x80000000 ... 0x9fffffff: /* RAM */
2305 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2307 case 0x40000000 ... 0x5fffffff: /* Peripheral */
2308 case 0xa0000000 ... 0xbfffffff: /* Device */
2309 case 0xc0000000 ... 0xdfffffff: /* Device */
2310 case 0xe0000000 ... 0xffffffff: /* System */
2311 *prot
= PAGE_READ
| PAGE_WRITE
;
2314 g_assert_not_reached();
2319 static bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
2321 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
2322 return arm_feature(env
, ARM_FEATURE_M
) &&
2323 extract32(address
, 20, 12) == 0xe00;
2326 static bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
2329 * True if address is in the M profile system region
2330 * 0xe0000000 - 0xffffffff
2332 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
2335 static bool pmsav7_use_background_region(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
2336 bool is_secure
, bool is_user
)
2339 * Return true if we should use the default memory map as a
2340 * "background" region if there are no hits against any MPU regions.
2342 CPUARMState
*env
= &cpu
->env
;
2348 if (arm_feature(env
, ARM_FEATURE_M
)) {
2349 return env
->v7m
.mpu_ctrl
[is_secure
] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
2352 if (mmu_idx
== ARMMMUIdx_Stage2
) {
2356 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
2359 static bool get_phys_addr_pmsav7(CPUARMState
*env
,
2362 MMUAccessType access_type
,
2363 GetPhysAddrResult
*result
,
2364 ARMMMUFaultInfo
*fi
)
2366 ARMCPU
*cpu
= env_archcpu(env
);
2368 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
2369 bool is_user
= regime_is_user(env
, mmu_idx
);
2370 bool secure
= arm_space_is_secure(ptw
->in_space
);
2372 result
->f
.phys_addr
= address
;
2373 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
2376 if (regime_translation_disabled(env
, mmu_idx
, ptw
->in_space
) ||
2377 m_is_ppb_region(env
, address
)) {
2379 * MPU disabled or M profile PPB access: use default memory map.
2380 * The other case which uses the default memory map in the
2381 * v7M ARM ARM pseudocode is exception vector reads from the vector
2382 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
2383 * which always does a direct read using address_space_ldl(), rather
2384 * than going via this function, so we don't need to check that here.
2386 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, &result
->f
.prot
);
2387 } else { /* MPU enabled */
2388 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
2390 uint32_t base
= env
->pmsav7
.drbar
[n
];
2391 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
2395 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
2400 qemu_log_mask(LOG_GUEST_ERROR
,
2401 "DRSR[%d]: Rsize field cannot be 0\n", n
);
2405 rmask
= (1ull << rsize
) - 1;
2408 qemu_log_mask(LOG_GUEST_ERROR
,
2409 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
2410 "to DRSR region size, mask = 0x%" PRIx32
"\n",
2415 if (address
< base
|| address
> base
+ rmask
) {
2417 * Address not in this region. We must check whether the
2418 * region covers addresses in the same page as our address.
2419 * In that case we must not report a size that covers the
2420 * whole page for a subsequent hit against a different MPU
2421 * region or the background region, because it would result in
2422 * incorrect TLB hits for subsequent accesses to addresses that
2423 * are in this MPU region.
2425 if (ranges_overlap(base
, rmask
,
2426 address
& TARGET_PAGE_MASK
,
2427 TARGET_PAGE_SIZE
)) {
2428 result
->f
.lg_page_size
= 0;
2433 /* Region matched */
2435 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
2437 uint32_t srdis_mask
;
2439 rsize
-= 3; /* sub region size (power of 2) */
2440 snd
= ((address
- base
) >> rsize
) & 0x7;
2441 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
2443 srdis_mask
= srdis
? 0x3 : 0x0;
2444 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
2446 * This will check in groups of 2, 4 and then 8, whether
2447 * the subregion bits are consistent. rsize is incremented
2448 * back up to give the region size, considering consistent
2449 * adjacent subregions as one region. Stop testing if rsize
2450 * is already big enough for an entire QEMU page.
2452 int snd_rounded
= snd
& ~(i
- 1);
2453 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
2454 snd_rounded
+ 8, i
);
2455 if (srdis_mask
^ srdis_multi
) {
2458 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
2465 if (rsize
< TARGET_PAGE_BITS
) {
2466 result
->f
.lg_page_size
= rsize
;
2471 if (n
== -1) { /* no hits */
2472 if (!pmsav7_use_background_region(cpu
, mmu_idx
, secure
, is_user
)) {
2473 /* background fault */
2474 fi
->type
= ARMFault_Background
;
2477 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
,
2479 } else { /* a MPU hit! */
2480 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
2481 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
2483 if (m_is_system_region(env
, address
)) {
2484 /* System space is always execute never */
2488 if (is_user
) { /* User mode AP bit decoding */
2493 break; /* no access */
2495 result
->f
.prot
|= PAGE_WRITE
;
2499 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
2502 /* for v7M, same as 6; for R profile a reserved value */
2503 if (arm_feature(env
, ARM_FEATURE_M
)) {
2504 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
2509 qemu_log_mask(LOG_GUEST_ERROR
,
2510 "DRACR[%d]: Bad value for AP bits: 0x%"
2511 PRIx32
"\n", n
, ap
);
2513 } else { /* Priv. mode AP bits decoding */
2516 break; /* no access */
2520 result
->f
.prot
|= PAGE_WRITE
;
2524 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
2527 /* for v7M, same as 6; for R profile a reserved value */
2528 if (arm_feature(env
, ARM_FEATURE_M
)) {
2529 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
2534 qemu_log_mask(LOG_GUEST_ERROR
,
2535 "DRACR[%d]: Bad value for AP bits: 0x%"
2536 PRIx32
"\n", n
, ap
);
2542 result
->f
.prot
&= ~PAGE_EXEC
;
2547 fi
->type
= ARMFault_Permission
;
2549 return !(result
->f
.prot
& (1 << access_type
));
2552 static uint32_t *regime_rbar(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
2555 if (regime_el(env
, mmu_idx
) == 2) {
2556 return env
->pmsav8
.hprbar
;
2558 return env
->pmsav8
.rbar
[secure
];
2562 static uint32_t *regime_rlar(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
2565 if (regime_el(env
, mmu_idx
) == 2) {
2566 return env
->pmsav8
.hprlar
;
2568 return env
->pmsav8
.rlar
[secure
];
2572 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
2573 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2574 bool secure
, GetPhysAddrResult
*result
,
2575 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
2578 * Perform a PMSAv8 MPU lookup (without also doing the SAU check
2579 * that a full phys-to-virt translation does).
2580 * mregion is (if not NULL) set to the region number which matched,
2581 * or -1 if no region number is returned (MPU off, address did not
2582 * hit a region, address hit in multiple regions).
2583 * If the region hit doesn't cover the entire TARGET_PAGE the address
2584 * is within, then we set the result page_size to 1 to force the
2585 * memory system to use a subpage.
2587 ARMCPU
*cpu
= env_archcpu(env
);
2588 bool is_user
= regime_is_user(env
, mmu_idx
);
2590 int matchregion
= -1;
2592 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
2593 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
2596 if (regime_el(env
, mmu_idx
) == 2) {
2597 region_counter
= cpu
->pmsav8r_hdregion
;
2599 region_counter
= cpu
->pmsav7_dregion
;
2602 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
2603 result
->f
.phys_addr
= address
;
2609 if (mmu_idx
== ARMMMUIdx_Stage2
) {
2614 * Unlike the ARM ARM pseudocode, we don't need to check whether this
2615 * was an exception vector read from the vector table (which is always
2616 * done using the default system address map), because those accesses
2617 * are done in arm_v7m_load_vector(), which always does a direct
2618 * read using address_space_ldl(), rather than going via this function.
2620 if (regime_translation_disabled(env
, mmu_idx
, arm_secure_to_space(secure
))) {
2623 } else if (m_is_ppb_region(env
, address
)) {
2626 if (pmsav7_use_background_region(cpu
, mmu_idx
, secure
, is_user
)) {
2631 if (arm_feature(env
, ARM_FEATURE_M
)) {
2638 for (n
= region_counter
- 1; n
>= 0; n
--) {
2641 * Note that the base address is bits [31:x] from the register
2642 * with bits [x-1:0] all zeroes, but the limit address is bits
2643 * [31:x] from the register with bits [x:0] all ones. Where x is
2644 * 5 for Cortex-M and 6 for Cortex-R
2646 uint32_t base
= regime_rbar(env
, mmu_idx
, secure
)[n
] & ~bitmask
;
2647 uint32_t limit
= regime_rlar(env
, mmu_idx
, secure
)[n
] | bitmask
;
2649 if (!(regime_rlar(env
, mmu_idx
, secure
)[n
] & 0x1)) {
2650 /* Region disabled */
2654 if (address
< base
|| address
> limit
) {
2656 * Address not in this region. We must check whether the
2657 * region covers addresses in the same page as our address.
2658 * In that case we must not report a size that covers the
2659 * whole page for a subsequent hit against a different MPU
2660 * region or the background region, because it would result in
2661 * incorrect TLB hits for subsequent accesses to addresses that
2662 * are in this MPU region.
2664 if (limit
>= base
&&
2665 ranges_overlap(base
, limit
- base
+ 1,
2667 TARGET_PAGE_SIZE
)) {
2668 result
->f
.lg_page_size
= 0;
2673 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
2674 result
->f
.lg_page_size
= 0;
2677 if (matchregion
!= -1) {
2679 * Multiple regions match -- always a failure (unlike
2680 * PMSAv7 where highest-numbered-region wins)
2682 fi
->type
= ARMFault_Permission
;
2683 if (arm_feature(env
, ARM_FEATURE_M
)) {
2695 if (arm_feature(env
, ARM_FEATURE_M
)) {
2696 fi
->type
= ARMFault_Background
;
2698 fi
->type
= ARMFault_Permission
;
2703 if (matchregion
== -1) {
2704 /* hit using the background region */
2705 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, &result
->f
.prot
);
2707 uint32_t matched_rbar
= regime_rbar(env
, mmu_idx
, secure
)[matchregion
];
2708 uint32_t matched_rlar
= regime_rlar(env
, mmu_idx
, secure
)[matchregion
];
2709 uint32_t ap
= extract32(matched_rbar
, 1, 2);
2710 uint32_t xn
= extract32(matched_rbar
, 0, 1);
2713 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
2714 pxn
= extract32(matched_rlar
, 4, 1);
2717 if (m_is_system_region(env
, address
)) {
2718 /* System space is always execute never */
2722 if (regime_el(env
, mmu_idx
) == 2) {
2723 result
->f
.prot
= simple_ap_to_rw_prot_is_user(ap
,
2724 mmu_idx
!= ARMMMUIdx_E2
);
2726 result
->f
.prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
2729 if (!arm_feature(env
, ARM_FEATURE_M
)) {
2730 uint8_t attrindx
= extract32(matched_rlar
, 1, 3);
2731 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
2732 uint8_t sh
= extract32(matched_rlar
, 3, 2);
2734 if (regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
&&
2735 result
->f
.prot
& PAGE_WRITE
&& mmu_idx
!= ARMMMUIdx_Stage2
) {
2739 if ((regime_el(env
, mmu_idx
) == 1) &&
2740 regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
&& ap
== 0x1) {
2744 result
->cacheattrs
.is_s2_format
= false;
2745 result
->cacheattrs
.attrs
= extract64(mair
, attrindx
* 8, 8);
2746 result
->cacheattrs
.shareability
= sh
;
2749 if (result
->f
.prot
&& !xn
&& !(pxn
&& !is_user
)) {
2750 result
->f
.prot
|= PAGE_EXEC
;
2754 *mregion
= matchregion
;
2758 fi
->type
= ARMFault_Permission
;
2759 if (arm_feature(env
, ARM_FEATURE_M
)) {
2762 return !(result
->f
.prot
& (1 << access_type
));
2765 static bool v8m_is_sau_exempt(CPUARMState
*env
,
2766 uint32_t address
, MMUAccessType access_type
)
2769 * The architecture specifies that certain address ranges are
2770 * exempt from v8M SAU/IDAU checks.
2773 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
2774 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
2775 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
2776 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
2777 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
2778 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
2781 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
2782 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2783 bool is_secure
, V8M_SAttributes
*sattrs
)
2786 * Look up the security attributes for this address. Compare the
2787 * pseudocode SecurityCheck() function.
2788 * We assume the caller has zero-initialized *sattrs.
2790 ARMCPU
*cpu
= env_archcpu(env
);
2792 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
2793 int idau_region
= IREGION_NOTVALID
;
2794 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
2795 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
2798 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
2799 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
2801 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
2805 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
2806 /* 0xf0000000..0xffffffff is always S for insn fetches */
2810 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
2811 sattrs
->ns
= !is_secure
;
2815 if (idau_region
!= IREGION_NOTVALID
) {
2816 sattrs
->irvalid
= true;
2817 sattrs
->iregion
= idau_region
;
2820 switch (env
->sau
.ctrl
& 3) {
2821 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
2823 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
2826 default: /* SAU.ENABLE == 1 */
2827 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
2828 if (env
->sau
.rlar
[r
] & 1) {
2829 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
2830 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
2832 if (base
<= address
&& limit
>= address
) {
2833 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
2834 sattrs
->subpage
= true;
2836 if (sattrs
->srvalid
) {
2838 * If we hit in more than one region then we must report
2839 * as Secure, not NS-Callable, with no valid region
2843 sattrs
->nsc
= false;
2844 sattrs
->sregion
= 0;
2845 sattrs
->srvalid
= false;
2848 if (env
->sau
.rlar
[r
] & 2) {
2853 sattrs
->srvalid
= true;
2854 sattrs
->sregion
= r
;
2858 * Address not in this region. We must check whether the
2859 * region covers addresses in the same page as our address.
2860 * In that case we must not report a size that covers the
2861 * whole page for a subsequent hit against a different MPU
2862 * region or the background region, because it would result
2863 * in incorrect TLB hits for subsequent accesses to
2864 * addresses that are in this MPU region.
2866 if (limit
>= base
&&
2867 ranges_overlap(base
, limit
- base
+ 1,
2869 TARGET_PAGE_SIZE
)) {
2870 sattrs
->subpage
= true;
2879 * The IDAU will override the SAU lookup results if it specifies
2880 * higher security than the SAU does.
2883 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
2885 sattrs
->nsc
= idau_nsc
;
2890 static bool get_phys_addr_pmsav8(CPUARMState
*env
,
2893 MMUAccessType access_type
,
2894 GetPhysAddrResult
*result
,
2895 ARMMMUFaultInfo
*fi
)
2897 V8M_SAttributes sattrs
= {};
2898 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
2899 bool secure
= arm_space_is_secure(ptw
->in_space
);
2902 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2903 v8m_security_lookup(env
, address
, access_type
, mmu_idx
,
2905 if (access_type
== MMU_INST_FETCH
) {
2907 * Instruction fetches always use the MMU bank and the
2908 * transaction attribute determined by the fetch address,
2909 * regardless of CPU state. This is painful for QEMU
2910 * to handle, because it would mean we need to encode
2911 * into the mmu_idx not just the (user, negpri) information
2912 * for the current security state but also that for the
2913 * other security state, which would balloon the number
2914 * of mmu_idx values needed alarmingly.
2915 * Fortunately we can avoid this because it's not actually
2916 * possible to arbitrarily execute code from memory with
2917 * the wrong security attribute: it will always generate
2918 * an exception of some kind or another, apart from the
2919 * special case of an NS CPU executing an SG instruction
2920 * in S&NSC memory. So we always just fail the translation
2921 * here and sort things out in the exception handler
2922 * (including possibly emulating an SG instruction).
2924 if (sattrs
.ns
!= !secure
) {
2926 fi
->type
= ARMFault_QEMU_NSCExec
;
2928 fi
->type
= ARMFault_QEMU_SFault
;
2930 result
->f
.lg_page_size
= sattrs
.subpage
? 0 : TARGET_PAGE_BITS
;
2931 result
->f
.phys_addr
= address
;
2937 * For data accesses we always use the MMU bank indicated
2938 * by the current CPU state, but the security attributes
2939 * might downgrade a secure access to nonsecure.
2942 result
->f
.attrs
.secure
= false;
2943 result
->f
.attrs
.space
= ARMSS_NonSecure
;
2944 } else if (!secure
) {
2946 * NS access to S memory must fault.
2947 * Architecturally we should first check whether the
2948 * MPU information for this address indicates that we
2949 * are doing an unaligned access to Device memory, which
2950 * should generate a UsageFault instead. QEMU does not
2951 * currently check for that kind of unaligned access though.
2952 * If we added it we would need to do so as a special case
2953 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2955 fi
->type
= ARMFault_QEMU_SFault
;
2956 result
->f
.lg_page_size
= sattrs
.subpage
? 0 : TARGET_PAGE_BITS
;
2957 result
->f
.phys_addr
= address
;
2964 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, secure
,
2966 if (sattrs
.subpage
) {
2967 result
->f
.lg_page_size
= 0;
2973 * Translate from the 4-bit stage 2 representation of
2974 * memory attributes (without cache-allocation hints) to
2975 * the 8-bit representation of the stage 1 MAIR registers
2976 * (which includes allocation hints).
2978 * ref: shared/translation/attrs/S2AttrDecode()
2979 * .../S2ConvertAttrsHints()
2981 static uint8_t convert_stage2_attrs(uint64_t hcr
, uint8_t s2attrs
)
2983 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
2984 uint8_t loattr
= extract32(s2attrs
, 0, 2);
2985 uint8_t hihint
= 0, lohint
= 0;
2987 if (hiattr
!= 0) { /* normal memory */
2988 if (hcr
& HCR_CD
) { /* cache disabled */
2989 hiattr
= loattr
= 1; /* non-cacheable */
2991 if (hiattr
!= 1) { /* Write-through or write-back */
2992 hihint
= 3; /* RW allocate */
2994 if (loattr
!= 1) { /* Write-through or write-back */
2995 lohint
= 3; /* RW allocate */
3000 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
3004 * Combine either inner or outer cacheability attributes for normal
3005 * memory, according to table D4-42 and pseudocode procedure
3006 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
3008 * NB: only stage 1 includes allocation hints (RW bits), leading to
3011 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
3013 if (s1
== 4 || s2
== 4) {
3014 /* non-cacheable has precedence */
3016 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
3017 /* stage 1 write-through takes precedence */
3019 } else if (extract32(s2
, 2, 2) == 2) {
3020 /* stage 2 write-through takes precedence, but the allocation hint
3021 * is still taken from stage 1
3023 return (2 << 2) | extract32(s1
, 0, 2);
3024 } else { /* write-back */
3030 * Combine the memory type and cacheability attributes of
3031 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
3032 * combined attributes in MAIR_EL1 format.
3034 static uint8_t combined_attrs_nofwb(uint64_t hcr
,
3035 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
3037 uint8_t s1lo
, s2lo
, s1hi
, s2hi
, s2_mair_attrs
, ret_attrs
;
3039 if (s2
.is_s2_format
) {
3040 s2_mair_attrs
= convert_stage2_attrs(hcr
, s2
.attrs
);
3042 s2_mair_attrs
= s2
.attrs
;
3045 s1lo
= extract32(s1
.attrs
, 0, 4);
3046 s2lo
= extract32(s2_mair_attrs
, 0, 4);
3047 s1hi
= extract32(s1
.attrs
, 4, 4);
3048 s2hi
= extract32(s2_mair_attrs
, 4, 4);
3050 /* Combine memory type and cacheability attributes */
3051 if (s1hi
== 0 || s2hi
== 0) {
3052 /* Device has precedence over normal */
3053 if (s1lo
== 0 || s2lo
== 0) {
3054 /* nGnRnE has precedence over anything */
3056 } else if (s1lo
== 4 || s2lo
== 4) {
3057 /* non-Reordering has precedence over Reordering */
3058 ret_attrs
= 4; /* nGnRE */
3059 } else if (s1lo
== 8 || s2lo
== 8) {
3060 /* non-Gathering has precedence over Gathering */
3061 ret_attrs
= 8; /* nGRE */
3063 ret_attrs
= 0xc; /* GRE */
3065 } else { /* Normal memory */
3066 /* Outer/inner cacheability combine independently */
3067 ret_attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
3068 | combine_cacheattr_nibble(s1lo
, s2lo
);
3073 static uint8_t force_cacheattr_nibble_wb(uint8_t attr
)
3076 * Given the 4 bits specifying the outer or inner cacheability
3077 * in MAIR format, return a value specifying Normal Write-Back,
3078 * with the allocation and transient hints taken from the input
3079 * if the input specified some kind of cacheable attribute.
3081 if (attr
== 0 || attr
== 4) {
3083 * 0 == an UNPREDICTABLE encoding
3084 * 4 == Non-cacheable
3085 * Either way, force Write-Back RW allocate non-transient
3089 /* Change WriteThrough to WriteBack, keep allocation and transient hints */
3094 * Combine the memory type and cacheability attributes of
3095 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
3096 * combined attributes in MAIR_EL1 format.
3098 static uint8_t combined_attrs_fwb(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
3100 assert(s2
.is_s2_format
&& !s1
.is_s2_format
);
3104 /* Use stage 1 attributes */
3108 * Force Normal Write-Back. Note that if S1 is Normal cacheable
3109 * then we take the allocation hints from it; otherwise it is
3110 * RW allocate, non-transient.
3112 if ((s1
.attrs
& 0xf0) == 0) {
3116 /* Need to check the Inner and Outer nibbles separately */
3117 return force_cacheattr_nibble_wb(s1
.attrs
& 0xf) |
3118 force_cacheattr_nibble_wb(s1
.attrs
>> 4) << 4;
3120 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
3121 if ((s1
.attrs
& 0xf0) == 0) {
3126 /* Force Device, of subtype specified by S2 */
3127 return s2
.attrs
<< 2;
3130 * RESERVED values (including RES0 descriptor bit [5] being nonzero);
3131 * arbitrarily force Device.
3138 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
3139 * and CombineS1S2Desc()
3142 * @s1: Attributes from stage 1 walk
3143 * @s2: Attributes from stage 2 walk
3145 static ARMCacheAttrs
combine_cacheattrs(uint64_t hcr
,
3146 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
3149 bool tagged
= false;
3151 assert(!s1
.is_s2_format
);
3152 ret
.is_s2_format
= false;
3154 if (s1
.attrs
== 0xf0) {
3159 /* Combine shareability attributes (table D4-43) */
3160 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
3161 /* if either are outer-shareable, the result is outer-shareable */
3162 ret
.shareability
= 2;
3163 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
3164 /* if either are inner-shareable, the result is inner-shareable */
3165 ret
.shareability
= 3;
3167 /* both non-shareable */
3168 ret
.shareability
= 0;
3171 /* Combine memory type and cacheability attributes */
3172 if (hcr
& HCR_FWB
) {
3173 ret
.attrs
= combined_attrs_fwb(s1
, s2
);
3175 ret
.attrs
= combined_attrs_nofwb(hcr
, s1
, s2
);
3179 * Any location for which the resultant memory type is any
3180 * type of Device memory is always treated as Outer Shareable.
3181 * Any location for which the resultant memory type is Normal
3182 * Inner Non-cacheable, Outer Non-cacheable is always treated
3183 * as Outer Shareable.
3184 * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
3186 if ((ret
.attrs
& 0xf0) == 0 || ret
.attrs
== 0x44) {
3187 ret
.shareability
= 2;
3190 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
3191 if (tagged
&& ret
.attrs
== 0xff) {
3199 * MMU disabled. S1 addresses within aa64 translation regimes are
3200 * still checked for bounds -- see AArch64.S1DisabledOutput().
3202 static bool get_phys_addr_disabled(CPUARMState
*env
,
3204 target_ulong address
,
3205 MMUAccessType access_type
,
3206 GetPhysAddrResult
*result
,
3207 ARMMMUFaultInfo
*fi
)
3209 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
3210 uint8_t memattr
= 0x00; /* Device nGnRnE */
3211 uint8_t shareability
= 0; /* non-shareable */
3215 case ARMMMUIdx_Stage2
:
3216 case ARMMMUIdx_Stage2_S
:
3217 case ARMMMUIdx_Phys_S
:
3218 case ARMMMUIdx_Phys_NS
:
3219 case ARMMMUIdx_Phys_Root
:
3220 case ARMMMUIdx_Phys_Realm
:
3224 r_el
= regime_el(env
, mmu_idx
);
3225 if (arm_el_is_aa64(env
, r_el
)) {
3226 int pamax
= arm_pamax(env_archcpu(env
));
3227 uint64_t tcr
= env
->cp15
.tcr_el
[r_el
];
3230 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
3231 if (access_type
== MMU_INST_FETCH
) {
3232 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
3234 tbi
= (tbi
>> extract64(address
, 55, 1)) & 1;
3235 addrtop
= (tbi
? 55 : 63);
3237 if (extract64(address
, pamax
, addrtop
- pamax
+ 1) != 0) {
3238 fi
->type
= ARMFault_AddressSize
;
3245 * When TBI is disabled, we've just validated that all of the
3246 * bits above PAMax are zero, so logically we only need to
3247 * clear the top byte for TBI. But it's clearer to follow
3248 * the pseudocode set of addrdesc.paddress.
3250 address
= extract64(address
, 0, 52);
3253 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
3255 uint64_t hcr
= arm_hcr_el2_eff_secstate(env
, ptw
->in_space
);
3257 if (hcr
& HCR_DCT
) {
3258 memattr
= 0xf0; /* Tagged, Normal, WB, RWA */
3260 memattr
= 0xff; /* Normal, WB, RWA */
3265 if (access_type
== MMU_INST_FETCH
) {
3266 if (regime_sctlr(env
, mmu_idx
) & SCTLR_I
) {
3267 memattr
= 0xee; /* Normal, WT, RA, NT */
3269 memattr
= 0x44; /* Normal, NC, No */
3272 shareability
= 2; /* outer shareable */
3274 result
->cacheattrs
.is_s2_format
= false;
3278 result
->f
.phys_addr
= address
;
3279 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3280 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
3281 result
->cacheattrs
.shareability
= shareability
;
3282 result
->cacheattrs
.attrs
= memattr
;
3286 static bool get_phys_addr_twostage(CPUARMState
*env
, S1Translate
*ptw
,
3287 target_ulong address
,
3288 MMUAccessType access_type
,
3289 GetPhysAddrResult
*result
,
3290 ARMMMUFaultInfo
*fi
)
3293 int s1_prot
, s1_lgpgsz
;
3294 ARMSecuritySpace in_space
= ptw
->in_space
;
3295 bool ret
, ipa_secure
, s1_guarded
;
3296 ARMCacheAttrs cacheattrs1
;
3297 ARMSecuritySpace ipa_space
;
3300 ret
= get_phys_addr_nogpc(env
, ptw
, address
, access_type
, result
, fi
);
3302 /* If S1 fails, return early. */
3307 ipa
= result
->f
.phys_addr
;
3308 ipa_secure
= result
->f
.attrs
.secure
;
3309 ipa_space
= result
->f
.attrs
.space
;
3311 ptw
->in_s1_is_el0
= ptw
->in_mmu_idx
== ARMMMUIdx_Stage1_E0
;
3312 ptw
->in_mmu_idx
= ipa_secure
? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
3313 ptw
->in_space
= ipa_space
;
3314 ptw
->in_ptw_idx
= ptw_idx_for_stage_2(env
, ptw
->in_mmu_idx
);
3317 * S1 is done, now do S2 translation.
3318 * Save the stage1 results so that we may merge prot and cacheattrs later.
3320 s1_prot
= result
->f
.prot
;
3321 s1_lgpgsz
= result
->f
.lg_page_size
;
3322 s1_guarded
= result
->f
.extra
.arm
.guarded
;
3323 cacheattrs1
= result
->cacheattrs
;
3324 memset(result
, 0, sizeof(*result
));
3326 ret
= get_phys_addr_nogpc(env
, ptw
, ipa
, access_type
, result
, fi
);
3329 /* Combine the S1 and S2 perms. */
3330 result
->f
.prot
&= s1_prot
;
3332 /* If S2 fails, return early. */
3338 * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE,
3339 * this means "don't put this in the TLB"; in this case, return a
3340 * result with lg_page_size == 0 to achieve that. Otherwise,
3341 * use the maximum of the S1 & S2 page size, so that invalidation
3342 * of pages > TARGET_PAGE_SIZE works correctly. (This works even though
3343 * we know the combined result permissions etc only cover the minimum
3344 * of the S1 and S2 page size, because we know that the common TLB code
3345 * never actually creates TLB entries bigger than TARGET_PAGE_SIZE,
3346 * and passing a larger page size value only affects invalidations.)
3348 if (result
->f
.lg_page_size
< TARGET_PAGE_BITS
||
3349 s1_lgpgsz
< TARGET_PAGE_BITS
) {
3350 result
->f
.lg_page_size
= 0;
3351 } else if (result
->f
.lg_page_size
< s1_lgpgsz
) {
3352 result
->f
.lg_page_size
= s1_lgpgsz
;
3355 /* Combine the S1 and S2 cache attributes. */
3356 hcr
= arm_hcr_el2_eff_secstate(env
, in_space
);
3359 * HCR.DC forces the first stage attributes to
3360 * Normal Non-Shareable,
3361 * Inner Write-Back Read-Allocate Write-Allocate,
3362 * Outer Write-Back Read-Allocate Write-Allocate.
3363 * Do not overwrite Tagged within attrs.
3365 if (cacheattrs1
.attrs
!= 0xf0) {
3366 cacheattrs1
.attrs
= 0xff;
3368 cacheattrs1
.shareability
= 0;
3370 result
->cacheattrs
= combine_cacheattrs(hcr
, cacheattrs1
,
3371 result
->cacheattrs
);
3373 /* No BTI GP information in stage 2, we just use the S1 value */
3374 result
->f
.extra
.arm
.guarded
= s1_guarded
;
3377 * Check if IPA translates to secure or non-secure PA space.
3378 * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
3380 if (in_space
== ARMSS_Secure
) {
3381 result
->f
.attrs
.secure
=
3382 !(env
->cp15
.vstcr_el2
& (VSTCR_SA
| VSTCR_SW
))
3384 || !(env
->cp15
.vtcr_el2
& (VTCR_NSA
| VTCR_NSW
)));
3385 result
->f
.attrs
.space
= arm_secure_to_space(result
->f
.attrs
.secure
);
3391 static bool get_phys_addr_nogpc(CPUARMState
*env
, S1Translate
*ptw
,
3392 target_ulong address
,
3393 MMUAccessType access_type
,
3394 GetPhysAddrResult
*result
,
3395 ARMMMUFaultInfo
*fi
)
3397 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
3398 ARMMMUIdx s1_mmu_idx
;
3401 * The page table entries may downgrade Secure to NonSecure, but
3402 * cannot upgrade a NonSecure translation regime's attributes
3403 * to Secure or Realm.
3405 result
->f
.attrs
.space
= ptw
->in_space
;
3406 result
->f
.attrs
.secure
= arm_space_is_secure(ptw
->in_space
);
3409 case ARMMMUIdx_Phys_S
:
3410 case ARMMMUIdx_Phys_NS
:
3411 case ARMMMUIdx_Phys_Root
:
3412 case ARMMMUIdx_Phys_Realm
:
3413 /* Checking Phys early avoids special casing later vs regime_el. */
3414 return get_phys_addr_disabled(env
, ptw
, address
, access_type
,
3417 case ARMMMUIdx_Stage1_E0
:
3418 case ARMMMUIdx_Stage1_E1
:
3419 case ARMMMUIdx_Stage1_E1_PAN
:
3421 * First stage lookup uses second stage for ptw; only
3422 * Secure has both S and NS IPA and starts with Stage2_S.
3424 ptw
->in_ptw_idx
= (ptw
->in_space
== ARMSS_Secure
) ?
3425 ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
3428 case ARMMMUIdx_Stage2
:
3429 case ARMMMUIdx_Stage2_S
:
3431 * Second stage lookup uses physical for ptw; whether this is S or
3432 * NS may depend on the SW/NSW bits if this is a stage 2 lookup for
3433 * the Secure EL2&0 regime.
3435 ptw
->in_ptw_idx
= ptw_idx_for_stage_2(env
, mmu_idx
);
3438 case ARMMMUIdx_E10_0
:
3439 s1_mmu_idx
= ARMMMUIdx_Stage1_E0
;
3441 case ARMMMUIdx_E10_1
:
3442 s1_mmu_idx
= ARMMMUIdx_Stage1_E1
;
3444 case ARMMMUIdx_E10_1_PAN
:
3445 s1_mmu_idx
= ARMMMUIdx_Stage1_E1_PAN
;
3448 * Call ourselves recursively to do the stage 1 and then stage 2
3449 * translations if mmu_idx is a two-stage regime, and EL2 present.
3450 * Otherwise, a stage1+stage2 translation is just stage 1.
3452 ptw
->in_mmu_idx
= mmu_idx
= s1_mmu_idx
;
3453 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
3454 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
, ptw
->in_space
)) {
3455 return get_phys_addr_twostage(env
, ptw
, address
, access_type
,
3461 /* Single stage uses physical for ptw. */
3462 ptw
->in_ptw_idx
= arm_space_to_phys(ptw
->in_space
);
3466 result
->f
.attrs
.user
= regime_is_user(env
, mmu_idx
);
3469 * Fast Context Switch Extension. This doesn't exist at all in v8.
3470 * In v7 and earlier it affects all stage 1 translations.
3472 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
3473 && !arm_feature(env
, ARM_FEATURE_V8
)) {
3474 if (regime_el(env
, mmu_idx
) == 3) {
3475 address
+= env
->cp15
.fcseidr_s
;
3477 address
+= env
->cp15
.fcseidr_ns
;
3481 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
3483 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
3485 if (arm_feature(env
, ARM_FEATURE_V8
)) {
3487 ret
= get_phys_addr_pmsav8(env
, ptw
, address
, access_type
,
3489 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
3491 ret
= get_phys_addr_pmsav7(env
, ptw
, address
, access_type
,
3495 ret
= get_phys_addr_pmsav5(env
, ptw
, address
, access_type
,
3498 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
3499 " mmu_idx %u -> %s (prot %c%c%c)\n",
3500 access_type
== MMU_DATA_LOAD
? "reading" :
3501 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
3502 (uint32_t)address
, mmu_idx
,
3503 ret
? "Miss" : "Hit",
3504 result
->f
.prot
& PAGE_READ
? 'r' : '-',
3505 result
->f
.prot
& PAGE_WRITE
? 'w' : '-',
3506 result
->f
.prot
& PAGE_EXEC
? 'x' : '-');
3511 /* Definitely a real MMU, not an MPU */
3513 if (regime_translation_disabled(env
, mmu_idx
, ptw
->in_space
)) {
3514 return get_phys_addr_disabled(env
, ptw
, address
, access_type
,
3518 if (regime_using_lpae_format(env
, mmu_idx
)) {
3519 return get_phys_addr_lpae(env
, ptw
, address
, access_type
, result
, fi
);
3520 } else if (arm_feature(env
, ARM_FEATURE_V7
) ||
3521 regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
3522 return get_phys_addr_v6(env
, ptw
, address
, access_type
, result
, fi
);
3524 return get_phys_addr_v5(env
, ptw
, address
, access_type
, result
, fi
);
3528 static bool get_phys_addr_gpc(CPUARMState
*env
, S1Translate
*ptw
,
3529 target_ulong address
,
3530 MMUAccessType access_type
,
3531 GetPhysAddrResult
*result
,
3532 ARMMMUFaultInfo
*fi
)
3534 if (get_phys_addr_nogpc(env
, ptw
, address
, access_type
, result
, fi
)) {
3537 if (!granule_protection_check(env
, result
->f
.phys_addr
,
3538 result
->f
.attrs
.space
, fi
)) {
3539 fi
->type
= ARMFault_GPCFOnOutput
;
3545 bool get_phys_addr_with_space_nogpc(CPUARMState
*env
, target_ulong address
,
3546 MMUAccessType access_type
,
3547 ARMMMUIdx mmu_idx
, ARMSecuritySpace space
,
3548 GetPhysAddrResult
*result
,
3549 ARMMMUFaultInfo
*fi
)
3552 .in_mmu_idx
= mmu_idx
,
3555 return get_phys_addr_nogpc(env
, &ptw
, address
, access_type
, result
, fi
);
3558 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
3559 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
3560 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
3563 .in_mmu_idx
= mmu_idx
,
3565 ARMSecuritySpace ss
;
3568 case ARMMMUIdx_E10_0
:
3569 case ARMMMUIdx_E10_1
:
3570 case ARMMMUIdx_E10_1_PAN
:
3571 case ARMMMUIdx_E20_0
:
3572 case ARMMMUIdx_E20_2
:
3573 case ARMMMUIdx_E20_2_PAN
:
3574 case ARMMMUIdx_Stage1_E0
:
3575 case ARMMMUIdx_Stage1_E1
:
3576 case ARMMMUIdx_Stage1_E1_PAN
:
3578 ss
= arm_security_space_below_el3(env
);
3580 case ARMMMUIdx_Stage2
:
3582 * For Secure EL2, we need this index to be NonSecure;
3583 * otherwise this will already be NonSecure or Realm.
3585 ss
= arm_security_space_below_el3(env
);
3586 if (ss
== ARMSS_Secure
) {
3587 ss
= ARMSS_NonSecure
;
3590 case ARMMMUIdx_Phys_NS
:
3591 case ARMMMUIdx_MPrivNegPri
:
3592 case ARMMMUIdx_MUserNegPri
:
3593 case ARMMMUIdx_MPriv
:
3594 case ARMMMUIdx_MUser
:
3595 ss
= ARMSS_NonSecure
;
3597 case ARMMMUIdx_Stage2_S
:
3598 case ARMMMUIdx_Phys_S
:
3599 case ARMMMUIdx_MSPrivNegPri
:
3600 case ARMMMUIdx_MSUserNegPri
:
3601 case ARMMMUIdx_MSPriv
:
3602 case ARMMMUIdx_MSUser
:
3606 if (arm_feature(env
, ARM_FEATURE_AARCH64
) &&
3607 cpu_isar_feature(aa64_rme
, env_archcpu(env
))) {
3613 case ARMMMUIdx_Phys_Root
:
3616 case ARMMMUIdx_Phys_Realm
:
3620 g_assert_not_reached();
3624 return get_phys_addr_gpc(env
, &ptw
, address
, access_type
, result
, fi
);
3627 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
3630 ARMCPU
*cpu
= ARM_CPU(cs
);
3631 CPUARMState
*env
= &cpu
->env
;
3632 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
3633 ARMSecuritySpace ss
= arm_security_space(env
);
3635 .in_mmu_idx
= mmu_idx
,
3639 GetPhysAddrResult res
= {};
3640 ARMMMUFaultInfo fi
= {};
3643 ret
= get_phys_addr_gpc(env
, &ptw
, addr
, MMU_DATA_LOAD
, &res
, &fi
);
3644 *attrs
= res
.f
.attrs
;
3649 return res
.f
.phys_addr
;