2 * ARM page table walking.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
11 #include "qemu/range.h"
13 #include "internals.h"
17 static bool get_phys_addr_lpae(CPUARMState
*env
, uint64_t address
,
18 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
19 bool s1_is_el0
, hwaddr
*phys_ptr
,
20 MemTxAttrs
*txattrs
, int *prot
,
21 target_ulong
*page_size_ptr
,
22 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
23 __attribute__((nonnull
));
25 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
26 static const uint8_t pamax_map
[] = {
36 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
37 unsigned int arm_pamax(ARMCPU
*cpu
)
39 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
40 unsigned int parange
=
41 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
44 * id_aa64mmfr0 is a read-only register so values outside of the
45 * supported mappings can be considered an implementation error.
47 assert(parange
< ARRAY_SIZE(pamax_map
));
48 return pamax_map
[parange
];
52 * In machvirt_init, we call arm_pamax on a cpu that is not fully
53 * initialized, so we can't rely on the propagation done in realize.
55 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
) ||
56 arm_feature(&cpu
->env
, ARM_FEATURE_V7VE
)) {
65 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
67 ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
70 case ARMMMUIdx_SE10_0
:
71 return ARMMMUIdx_Stage1_SE0
;
72 case ARMMMUIdx_SE10_1
:
73 return ARMMMUIdx_Stage1_SE1
;
74 case ARMMMUIdx_SE10_1_PAN
:
75 return ARMMMUIdx_Stage1_SE1_PAN
;
77 return ARMMMUIdx_Stage1_E0
;
79 return ARMMMUIdx_Stage1_E1
;
80 case ARMMMUIdx_E10_1_PAN
:
81 return ARMMMUIdx_Stage1_E1_PAN
;
87 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
89 return stage_1_mmu_idx(arm_mmu_idx(env
));
92 static bool regime_translation_big_endian(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
94 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
97 static bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
100 case ARMMMUIdx_SE10_0
:
101 case ARMMMUIdx_E20_0
:
102 case ARMMMUIdx_SE20_0
:
103 case ARMMMUIdx_Stage1_E0
:
104 case ARMMMUIdx_Stage1_SE0
:
105 case ARMMMUIdx_MUser
:
106 case ARMMMUIdx_MSUser
:
107 case ARMMMUIdx_MUserNegPri
:
108 case ARMMMUIdx_MSUserNegPri
:
112 case ARMMMUIdx_E10_0
:
113 case ARMMMUIdx_E10_1
:
114 case ARMMMUIdx_E10_1_PAN
:
115 g_assert_not_reached();
119 /* Return the TTBR associated with this translation regime */
120 static uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ttbrn
)
122 if (mmu_idx
== ARMMMUIdx_Stage2
) {
123 return env
->cp15
.vttbr_el2
;
125 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
126 return env
->cp15
.vsttbr_el2
;
129 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
131 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
135 /* Return true if the specified stage of address translation is disabled */
136 static bool regime_translation_disabled(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
140 if (arm_feature(env
, ARM_FEATURE_M
)) {
141 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
142 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
143 case R_V7M_MPU_CTRL_ENABLE_MASK
:
144 /* Enabled, but not for HardFault and NMI */
145 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
146 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
147 /* Enabled for all cases */
152 * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
153 * we warned about that in armv7m_nvic.c when the guest set it.
159 hcr_el2
= arm_hcr_el2_eff(env
);
161 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
162 /* HCR.DC means HCR.VM behaves as 1 */
163 return (hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
166 if (hcr_el2
& HCR_TGE
) {
167 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
168 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
173 if ((hcr_el2
& HCR_DC
) && arm_mmu_idx_is_stage1_of_2(mmu_idx
)) {
174 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
178 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
181 static bool ptw_attrs_are_device(CPUARMState
*env
, ARMCacheAttrs cacheattrs
)
184 * For an S1 page table walk, the stage 1 attributes are always
185 * some form of "this is Normal memory". The combined S1+S2
186 * attributes are therefore only Device if stage 2 specifies Device.
187 * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
188 * ie when cacheattrs.attrs bits [3:2] are 0b00.
189 * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
190 * when cacheattrs.attrs bit [2] is 0.
192 assert(cacheattrs
.is_s2_format
);
193 if (arm_hcr_el2_eff(env
) & HCR_FWB
) {
194 return (cacheattrs
.attrs
& 0x4) == 0;
196 return (cacheattrs
.attrs
& 0xc) == 0;
200 /* Translate a S1 pagetable walk through S2 if needed. */
201 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
202 hwaddr addr
, bool *is_secure
,
205 if (arm_mmu_idx_is_stage1_of_2(mmu_idx
) &&
206 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
211 ARMMMUIdx s2_mmu_idx
= *is_secure
? ARMMMUIdx_Stage2_S
213 ARMCacheAttrs cacheattrs
= {};
214 MemTxAttrs txattrs
= {};
216 ret
= get_phys_addr_lpae(env
, addr
, MMU_DATA_LOAD
, s2_mmu_idx
, false,
217 &s2pa
, &txattrs
, &s2prot
, &s2size
, fi
,
220 assert(fi
->type
!= ARMFault_None
);
224 fi
->s1ns
= !*is_secure
;
227 if ((arm_hcr_el2_eff(env
) & HCR_PTW
) &&
228 ptw_attrs_are_device(env
, cacheattrs
)) {
230 * PTW set and S1 walk touched S2 Device memory:
231 * generate Permission fault.
233 fi
->type
= ARMFault_Permission
;
237 fi
->s1ns
= !*is_secure
;
241 if (arm_is_secure_below_el3(env
)) {
242 /* Check if page table walk is to secure or non-secure PA space. */
244 *is_secure
= !(env
->cp15
.vstcr_el2
.raw_tcr
& VSTCR_SW
);
246 *is_secure
= !(env
->cp15
.vtcr_el2
.raw_tcr
& VTCR_NSW
);
257 /* All loads done in the course of a page table walk go through here. */
258 static uint32_t arm_ldl_ptw(CPUARMState
*env
, hwaddr addr
, bool is_secure
,
259 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
261 CPUState
*cs
= env_cpu(env
);
262 MemTxAttrs attrs
= {};
263 MemTxResult result
= MEMTX_OK
;
267 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, &is_secure
, fi
);
268 attrs
.secure
= is_secure
;
269 as
= arm_addressspace(cs
, attrs
);
273 if (regime_translation_big_endian(env
, mmu_idx
)) {
274 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
276 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
278 if (result
== MEMTX_OK
) {
281 fi
->type
= ARMFault_SyncExternalOnWalk
;
282 fi
->ea
= arm_extabort_type(result
);
286 static uint64_t arm_ldq_ptw(CPUARMState
*env
, hwaddr addr
, bool is_secure
,
287 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
289 CPUState
*cs
= env_cpu(env
);
290 MemTxAttrs attrs
= {};
291 MemTxResult result
= MEMTX_OK
;
295 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, &is_secure
, fi
);
296 attrs
.secure
= is_secure
;
297 as
= arm_addressspace(cs
, attrs
);
301 if (regime_translation_big_endian(env
, mmu_idx
)) {
302 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
304 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
306 if (result
== MEMTX_OK
) {
309 fi
->type
= ARMFault_SyncExternalOnWalk
;
310 fi
->ea
= arm_extabort_type(result
);
314 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
315 uint32_t *table
, uint32_t address
)
317 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
318 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
320 if (address
& tcr
->mask
) {
321 if (tcr
->raw_tcr
& TTBCR_PD1
) {
322 /* Translation table walk disabled for TTBR1 */
325 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
327 if (tcr
->raw_tcr
& TTBCR_PD0
) {
328 /* Translation table walk disabled for TTBR0 */
331 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
333 *table
|= (address
>> 18) & 0x3ffc;
338 * Translate section/page access permissions to page R/W protection flags
340 * @mmu_idx: MMU index indicating required translation regime
341 * @ap: The 3-bit access permissions (AP[2:0])
342 * @domain_prot: The 2-bit domain access permissions
344 static int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
345 int ap
, int domain_prot
)
347 bool is_user
= regime_is_user(env
, mmu_idx
);
349 if (domain_prot
== 3) {
350 return PAGE_READ
| PAGE_WRITE
;
355 if (arm_feature(env
, ARM_FEATURE_V7
)) {
358 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
360 return is_user
? 0 : PAGE_READ
;
367 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
372 return PAGE_READ
| PAGE_WRITE
;
375 return PAGE_READ
| PAGE_WRITE
;
376 case 4: /* Reserved. */
379 return is_user
? 0 : PAGE_READ
;
383 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
388 g_assert_not_reached();
393 * Translate section/page access permissions to page R/W protection flags.
394 * @ap: The 2-bit simple AP (AP[2:1])
395 * @is_user: TRUE if accessing from PL0
397 static int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
401 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
403 return PAGE_READ
| PAGE_WRITE
;
405 return is_user
? 0 : PAGE_READ
;
409 g_assert_not_reached();
413 static int simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
415 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
418 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
419 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
420 hwaddr
*phys_ptr
, int *prot
,
421 target_ulong
*page_size
,
434 /* Pagetable walk. */
435 /* Lookup l1 descriptor. */
436 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
437 /* Section translation fault if page walk is disabled by PD0 or PD1 */
438 fi
->type
= ARMFault_Translation
;
441 desc
= arm_ldl_ptw(env
, table
, regime_is_secure(env
, mmu_idx
),
443 if (fi
->type
!= ARMFault_None
) {
447 domain
= (desc
>> 5) & 0x0f;
448 if (regime_el(env
, mmu_idx
) == 1) {
449 dacr
= env
->cp15
.dacr_ns
;
451 dacr
= env
->cp15
.dacr_s
;
453 domain_prot
= (dacr
>> (domain
* 2)) & 3;
455 /* Section translation fault. */
456 fi
->type
= ARMFault_Translation
;
462 if (domain_prot
== 0 || domain_prot
== 2) {
463 fi
->type
= ARMFault_Domain
;
468 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
469 ap
= (desc
>> 10) & 3;
470 *page_size
= 1024 * 1024;
472 /* Lookup l2 entry. */
474 /* Coarse pagetable. */
475 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
477 /* Fine pagetable. */
478 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
480 desc
= arm_ldl_ptw(env
, table
, regime_is_secure(env
, mmu_idx
),
482 if (fi
->type
!= ARMFault_None
) {
486 case 0: /* Page translation fault. */
487 fi
->type
= ARMFault_Translation
;
489 case 1: /* 64k page. */
490 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
491 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
492 *page_size
= 0x10000;
494 case 2: /* 4k page. */
495 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
496 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
499 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
501 /* ARMv6/XScale extended small page format */
502 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
503 || arm_feature(env
, ARM_FEATURE_V6
)) {
504 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
508 * UNPREDICTABLE in ARMv5; we choose to take a
509 * page translation fault.
511 fi
->type
= ARMFault_Translation
;
515 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
518 ap
= (desc
>> 4) & 3;
521 /* Never happens, but compiler isn't smart enough to tell. */
522 g_assert_not_reached();
525 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
526 *prot
|= *prot
? PAGE_EXEC
: 0;
527 if (!(*prot
& (1 << access_type
))) {
528 /* Access permission fault. */
529 fi
->type
= ARMFault_Permission
;
532 *phys_ptr
= phys_addr
;
540 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
541 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
542 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
543 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
545 ARMCPU
*cpu
= env_archcpu(env
);
559 /* Pagetable walk. */
560 /* Lookup l1 descriptor. */
561 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
562 /* Section translation fault if page walk is disabled by PD0 or PD1 */
563 fi
->type
= ARMFault_Translation
;
566 desc
= arm_ldl_ptw(env
, table
, regime_is_secure(env
, mmu_idx
),
568 if (fi
->type
!= ARMFault_None
) {
572 if (type
== 0 || (type
== 3 && !cpu_isar_feature(aa32_pxn
, cpu
))) {
573 /* Section translation fault, or attempt to use the encoding
574 * which is Reserved on implementations without PXN.
576 fi
->type
= ARMFault_Translation
;
579 if ((type
== 1) || !(desc
& (1 << 18))) {
580 /* Page or Section. */
581 domain
= (desc
>> 5) & 0x0f;
583 if (regime_el(env
, mmu_idx
) == 1) {
584 dacr
= env
->cp15
.dacr_ns
;
586 dacr
= env
->cp15
.dacr_s
;
591 domain_prot
= (dacr
>> (domain
* 2)) & 3;
592 if (domain_prot
== 0 || domain_prot
== 2) {
593 /* Section or Page domain fault */
594 fi
->type
= ARMFault_Domain
;
598 if (desc
& (1 << 18)) {
600 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
601 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
602 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
603 *page_size
= 0x1000000;
606 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
607 *page_size
= 0x100000;
609 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
610 xn
= desc
& (1 << 4);
612 ns
= extract32(desc
, 19, 1);
614 if (cpu_isar_feature(aa32_pxn
, cpu
)) {
615 pxn
= (desc
>> 2) & 1;
617 ns
= extract32(desc
, 3, 1);
618 /* Lookup l2 entry. */
619 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
620 desc
= arm_ldl_ptw(env
, table
, regime_is_secure(env
, mmu_idx
),
622 if (fi
->type
!= ARMFault_None
) {
625 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
627 case 0: /* Page translation fault. */
628 fi
->type
= ARMFault_Translation
;
630 case 1: /* 64k page. */
631 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
632 xn
= desc
& (1 << 15);
633 *page_size
= 0x10000;
635 case 2: case 3: /* 4k page. */
636 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
641 /* Never happens, but compiler isn't smart enough to tell. */
642 g_assert_not_reached();
645 if (domain_prot
== 3) {
646 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
648 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
651 if (xn
&& access_type
== MMU_INST_FETCH
) {
652 fi
->type
= ARMFault_Permission
;
656 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
657 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
658 /* The simplified model uses AP[0] as an access control bit. */
660 /* Access flag fault. */
661 fi
->type
= ARMFault_AccessFlag
;
664 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
666 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
671 if (!(*prot
& (1 << access_type
))) {
672 /* Access permission fault. */
673 fi
->type
= ARMFault_Permission
;
678 /* The NS bit will (as required by the architecture) have no effect if
679 * the CPU doesn't support TZ or this is a non-secure translation
680 * regime, because the attribute will already be non-secure.
682 attrs
->secure
= false;
684 *phys_ptr
= phys_addr
;
693 * Translate S2 section/page access permissions to protection flags
695 * @s2ap: The 2-bit stage2 access permissions (S2AP)
696 * @xn: XN (execute-never) bits
697 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
699 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
, bool s1_is_el0
)
710 if (cpu_isar_feature(any_tts2uxn
, env_archcpu(env
))) {
728 g_assert_not_reached();
731 if (!extract32(xn
, 1, 1)) {
732 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
741 * Translate section/page access permissions to protection flags
743 * @mmu_idx: MMU index indicating required translation regime
744 * @is_aa64: TRUE if AArch64
745 * @ap: The 2-bit simple AP (AP[2:1])
746 * @ns: NS (non-secure) bit
747 * @xn: XN (execute-never) bit
748 * @pxn: PXN (privileged execute-never) bit
750 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
751 int ap
, int ns
, int xn
, int pxn
)
753 bool is_user
= regime_is_user(env
, mmu_idx
);
754 int prot_rw
, user_rw
;
758 assert(mmu_idx
!= ARMMMUIdx_Stage2
);
759 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
761 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
765 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
766 /* PAN forbids data accesses but doesn't affect insn fetch */
769 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
773 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
777 /* TODO have_wxn should be replaced with
778 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
779 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
780 * compatible processors have EL2, which is required for [U]WXN.
782 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
785 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
789 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
790 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
792 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
793 switch (regime_el(env
, mmu_idx
)) {
797 xn
= xn
|| !(user_rw
& PAGE_READ
);
801 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
803 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
804 (uwxn
&& (user_rw
& PAGE_WRITE
));
814 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
817 return prot_rw
| PAGE_EXEC
;
820 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
823 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
824 uint32_t el
= regime_el(env
, mmu_idx
);
828 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
830 if (mmu_idx
== ARMMMUIdx_Stage2
) {
832 bool sext
= extract32(tcr
, 4, 1);
833 bool sign
= extract32(tcr
, 3, 1);
836 * If the sign-extend bit is not the same as t0sz[3], the result
837 * is unpredictable. Flag this as a guest error.
840 qemu_log_mask(LOG_GUEST_ERROR
,
841 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
843 tsz
= sextract32(tcr
, 0, 4) + 8;
847 } else if (el
== 2) {
849 tsz
= extract32(tcr
, 0, 3);
851 hpd
= extract64(tcr
, 24, 1);
854 int t0sz
= extract32(tcr
, 0, 3);
855 int t1sz
= extract32(tcr
, 16, 3);
858 select
= va
> (0xffffffffu
>> t0sz
);
860 /* Note that we will detect errors later. */
861 select
= va
>= ~(0xffffffffu
>> t1sz
);
865 epd
= extract32(tcr
, 7, 1);
866 hpd
= extract64(tcr
, 41, 1);
869 epd
= extract32(tcr
, 23, 1);
870 hpd
= extract64(tcr
, 42, 1);
872 /* For aarch32, hpd0 is not enabled without t2e as well. */
873 hpd
&= extract32(tcr
, 6, 1);
876 return (ARMVAParameters
) {
887 * @is_aa64: True if the translation regime is in AArch64 state
888 * @startlevel: Suggested starting level
889 * @inputsize: Bitsize of IPAs
890 * @stride: Page-table stride (See the ARM ARM)
892 * Returns true if the suggested S2 translation parameters are OK and
895 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
896 int inputsize
, int stride
, int outputsize
)
898 const int grainsize
= stride
+ 3;
902 * Negative levels are usually not allowed...
903 * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
904 * begins with level -1. Note that previous feature tests will have
905 * eliminated this combination if it is not enabled.
907 if (level
< (inputsize
== 52 && stride
== 9 ? -1 : 0)) {
911 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
912 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
918 case 13: /* 64KB Pages. */
919 if (level
== 0 || (level
== 1 && outputsize
<= 42)) {
923 case 11: /* 16KB Pages. */
924 if (level
== 0 || (level
== 1 && outputsize
<= 40)) {
928 case 9: /* 4KB Pages. */
929 if (level
== 0 && outputsize
<= 42) {
934 g_assert_not_reached();
937 /* Inputsize checks. */
938 if (inputsize
> outputsize
&&
939 (arm_el_is_aa64(&cpu
->env
, 1) || inputsize
> 40)) {
940 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
944 /* AArch32 only supports 4KB pages. Assert on that. */
955 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
957 * Returns false if the translation was successful. Otherwise, phys_ptr,
958 * attrs, prot and page_size may not be filled in, and the populated fsr
959 * value provides information on why the translation aborted, in the format
960 * of a long-format DFSR/IFSR fault register, with the following caveat:
961 * the WnR bit is never set (the caller must do this).
964 * @address: virtual address to get physical address for
965 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
966 * @mmu_idx: MMU index indicating required translation regime
967 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page
968 * table walk), must be true if this is stage 2 of a stage 1+2
969 * walk for an EL0 access. If @mmu_idx is anything else,
970 * @s1_is_el0 is ignored.
971 * @phys_ptr: set to the physical address corresponding to the virtual address
972 * @attrs: set to the memory transaction attributes to use
973 * @prot: set to the permissions for the page containing phys_ptr
974 * @page_size_ptr: set to the size of the page containing phys_ptr
975 * @fi: set to fault info if the translation fails
976 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
978 static bool get_phys_addr_lpae(CPUARMState
*env
, uint64_t address
,
979 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
980 bool s1_is_el0
, hwaddr
*phys_ptr
,
981 MemTxAttrs
*txattrs
, int *prot
,
982 target_ulong
*page_size_ptr
,
983 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
985 ARMCPU
*cpu
= env_archcpu(env
);
986 /* Read an LPAE long-descriptor translation table. */
987 ARMFaultType fault_type
= ARMFault_Translation
;
989 ARMVAParameters param
;
991 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
993 target_ulong page_size
;
996 int addrsize
, inputsize
, outputsize
;
997 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
999 uint32_t el
= regime_el(env
, mmu_idx
);
1000 uint64_t descaddrmask
;
1001 bool aarch64
= arm_el_is_aa64(env
, el
);
1002 bool guarded
= false;
1004 /* TODO: This code does not support shareability levels. */
1008 param
= aa64_va_parameters(env
, address
, mmu_idx
,
1009 access_type
!= MMU_INST_FETCH
);
1013 * If TxSZ is programmed to a value larger than the maximum,
1014 * or smaller than the effective minimum, it is IMPLEMENTATION
1015 * DEFINED whether we behave as if the field were programmed
1016 * within bounds, or if a level 0 Translation fault is generated.
1018 * With FEAT_LVA, fault on less than minimum becomes required,
1019 * so our choice is to always raise the fault.
1021 if (param
.tsz_oob
) {
1022 fault_type
= ARMFault_Translation
;
1026 addrsize
= 64 - 8 * param
.tbi
;
1027 inputsize
= 64 - param
.tsz
;
1030 * Bound PS by PARANGE to find the effective output address size.
1031 * ID_AA64MMFR0 is a read-only register so values outside of the
1032 * supported mappings can be considered an implementation error.
1034 ps
= FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
1035 ps
= MIN(ps
, param
.ps
);
1036 assert(ps
< ARRAY_SIZE(pamax_map
));
1037 outputsize
= pamax_map
[ps
];
1039 param
= aa32_va_parameters(env
, address
, mmu_idx
);
1041 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
1042 inputsize
= addrsize
- param
.tsz
;
1047 * We determined the region when collecting the parameters, but we
1048 * have not yet validated that the address is valid for the region.
1049 * Extract the top bits and verify that they all match select.
1051 * For aa32, if inputsize == addrsize, then we have selected the
1052 * region by exclusion in aa32_va_parameters and there is no more
1053 * validation to do here.
1055 if (inputsize
< addrsize
) {
1056 target_ulong top_bits
= sextract64(address
, inputsize
,
1057 addrsize
- inputsize
);
1058 if (-top_bits
!= param
.select
) {
1059 /* The gap between the two regions is a Translation fault */
1060 fault_type
= ARMFault_Translation
;
1065 if (param
.using64k
) {
1067 } else if (param
.using16k
) {
1074 * Note that QEMU ignores shareability and cacheability attributes,
1075 * so we don't need to do anything with the SH, ORGN, IRGN fields
1076 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
1077 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1078 * implement any ASID-like capability so we can ignore it (instead
1079 * we will always flush the TLB any time the ASID is changed).
1081 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
1084 * Here we should have set up all the parameters for the translation:
1085 * inputsize, ttbr, epd, stride, tbi
1090 * Translation table walk disabled => Translation fault on TLB miss
1091 * Note: This is always 0 on 64-bit EL2 and EL3.
1096 if (mmu_idx
!= ARMMMUIdx_Stage2
&& mmu_idx
!= ARMMMUIdx_Stage2_S
) {
1098 * The starting level depends on the virtual address size (which can
1099 * be up to 48 bits) and the translation granule size. It indicates
1100 * the number of strides (stride bits at a time) needed to
1101 * consume the bits of the input address. In the pseudocode this is:
1102 * level = 4 - RoundUp((inputsize - grainsize) / stride)
1103 * where their 'inputsize' is our 'inputsize', 'grainsize' is
1104 * our 'stride + 3' and 'stride' is our 'stride'.
1105 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1106 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1107 * = 4 - (inputsize - 4) / stride;
1109 level
= 4 - (inputsize
- 4) / stride
;
1112 * For stage 2 translations the starting level is specified by the
1113 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
1115 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
1116 uint32_t sl2
= extract64(tcr
->raw_tcr
, 33, 1);
1117 uint32_t startlevel
;
1120 /* SL2 is RES0 unless DS=1 & 4kb granule. */
1121 if (param
.ds
&& stride
== 9 && sl2
) {
1124 fault_type
= ARMFault_Translation
;
1128 } else if (!aarch64
|| stride
== 9) {
1129 /* AArch32 or 4KB pages */
1130 startlevel
= 2 - sl0
;
1132 if (cpu_isar_feature(aa64_st
, cpu
)) {
1136 /* 16KB or 64KB pages */
1137 startlevel
= 3 - sl0
;
1140 /* Check that the starting level is valid. */
1141 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
1142 inputsize
, stride
, outputsize
);
1144 fault_type
= ARMFault_Translation
;
1150 indexmask_grainsize
= MAKE_64BIT_MASK(0, stride
+ 3);
1151 indexmask
= MAKE_64BIT_MASK(0, inputsize
- (stride
* (4 - level
)));
1153 /* Now we can extract the actual base address from the TTBR */
1154 descaddr
= extract64(ttbr
, 0, 48);
1157 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1159 * Otherwise, if the base address is out of range, raise AddressSizeFault.
1160 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1161 * but we've just cleared the bits above 47, so simplify the test.
1163 if (outputsize
> 48) {
1164 descaddr
|= extract64(ttbr
, 2, 4) << 48;
1165 } else if (descaddr
>> outputsize
) {
1167 fault_type
= ARMFault_AddressSize
;
1172 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1173 * and also to mask out CnP (bit 0) which could validly be non-zero.
1175 descaddr
&= ~indexmask
;
1178 * For AArch32, the address field in the descriptor goes up to bit 39
1179 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
1180 * or an AddressSize fault is raised. So for v8 we extract those SBZ
1181 * bits as part of the address, which will be checked via outputsize.
1182 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1183 * the highest bits of a 52-bit output are placed elsewhere.
1186 descaddrmask
= MAKE_64BIT_MASK(0, 50);
1187 } else if (arm_feature(env
, ARM_FEATURE_V8
)) {
1188 descaddrmask
= MAKE_64BIT_MASK(0, 48);
1190 descaddrmask
= MAKE_64BIT_MASK(0, 40);
1192 descaddrmask
&= ~indexmask_grainsize
;
1195 * Secure accesses start with the page table in secure memory and
1196 * can be downgraded to non-secure at any step. Non-secure accesses
1197 * remain non-secure. We implement this by just ORing in the NSTable/NS
1198 * bits at each step.
1200 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
1202 uint64_t descriptor
;
1205 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
1207 nstable
= extract32(tableattrs
, 4, 1);
1208 descriptor
= arm_ldq_ptw(env
, descaddr
, !nstable
, mmu_idx
, fi
);
1209 if (fi
->type
!= ARMFault_None
) {
1213 if (!(descriptor
& 1) ||
1214 (!(descriptor
& 2) && (level
== 3))) {
1215 /* Invalid, or the Reserved level 3 encoding */
1219 descaddr
= descriptor
& descaddrmask
;
1222 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1223 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
1224 * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
1225 * raise AddressSizeFault.
1227 if (outputsize
> 48) {
1229 descaddr
|= extract64(descriptor
, 8, 2) << 50;
1231 descaddr
|= extract64(descriptor
, 12, 4) << 48;
1233 } else if (descaddr
>> outputsize
) {
1234 fault_type
= ARMFault_AddressSize
;
1238 if ((descriptor
& 2) && (level
< 3)) {
1240 * Table entry. The top five bits are attributes which may
1241 * propagate down through lower levels of the table (and
1242 * which are all arranged so that 0 means "no effect", so
1243 * we can gather them up by ORing in the bits at each level).
1245 tableattrs
|= extract64(descriptor
, 59, 5);
1247 indexmask
= indexmask_grainsize
;
1251 * Block entry at level 1 or 2, or page entry at level 3.
1252 * These are basically the same thing, although the number
1253 * of bits we pull in from the vaddr varies. Note that although
1254 * descaddrmask masks enough of the low bits of the descriptor
1255 * to give a correct page or table address, the address field
1256 * in a block descriptor is smaller; so we need to explicitly
1257 * clear the lower bits here before ORing in the low vaddr bits.
1259 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
1260 descaddr
&= ~(page_size
- 1);
1261 descaddr
|= (address
& (page_size
- 1));
1262 /* Extract attributes from the descriptor */
1263 attrs
= extract64(descriptor
, 2, 10)
1264 | (extract64(descriptor
, 52, 12) << 10);
1266 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
1267 /* Stage 2 table descriptors do not include any attribute fields */
1270 /* Merge in attributes from table descriptors */
1271 attrs
|= nstable
<< 3; /* NS */
1272 guarded
= extract64(descriptor
, 50, 1); /* GP */
1274 /* HPD disables all the table attributes except NSTable. */
1277 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
1279 * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1280 * means "force PL1 access only", which means forcing AP[1] to 0.
1282 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
1283 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
1287 * Here descaddr is the final physical address, and attributes
1290 fault_type
= ARMFault_AccessFlag
;
1291 if ((attrs
& (1 << 8)) == 0) {
1296 ap
= extract32(attrs
, 4, 2);
1298 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
1299 ns
= mmu_idx
== ARMMMUIdx_Stage2
;
1300 xn
= extract32(attrs
, 11, 2);
1301 *prot
= get_S2prot(env
, ap
, xn
, s1_is_el0
);
1303 ns
= extract32(attrs
, 3, 1);
1304 xn
= extract32(attrs
, 12, 1);
1305 pxn
= extract32(attrs
, 11, 1);
1306 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
1309 fault_type
= ARMFault_Permission
;
1310 if (!(*prot
& (1 << access_type
))) {
1316 * The NS bit will (as required by the architecture) have no effect if
1317 * the CPU doesn't support TZ or this is a non-secure translation
1318 * regime, because the attribute will already be non-secure.
1320 txattrs
->secure
= false;
1322 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
1323 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
1324 arm_tlb_bti_gp(txattrs
) = true;
1327 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
1328 cacheattrs
->is_s2_format
= true;
1329 cacheattrs
->attrs
= extract32(attrs
, 0, 4);
1331 /* Index into MAIR registers for cache attributes */
1332 uint8_t attrindx
= extract32(attrs
, 0, 3);
1333 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
1334 assert(attrindx
<= 7);
1335 cacheattrs
->is_s2_format
= false;
1336 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
1340 * For FEAT_LPA2 and effective DS, the SH field in the attributes
1341 * was re-purposed for output address bits. The SH attribute in
1342 * that case comes from TCR_ELx, which we extracted earlier.
1345 cacheattrs
->shareability
= param
.sh
;
1347 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
1350 *phys_ptr
= descaddr
;
1351 *page_size_ptr
= page_size
;
1355 fi
->type
= fault_type
;
1357 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
1358 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_Stage2
||
1359 mmu_idx
== ARMMMUIdx_Stage2_S
);
1360 fi
->s1ns
= mmu_idx
== ARMMMUIdx_Stage2
;
1364 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
1365 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1366 hwaddr
*phys_ptr
, int *prot
,
1367 ARMMMUFaultInfo
*fi
)
1372 bool is_user
= regime_is_user(env
, mmu_idx
);
1374 if (regime_translation_disabled(env
, mmu_idx
)) {
1376 *phys_ptr
= address
;
1377 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1381 *phys_ptr
= address
;
1382 for (n
= 7; n
>= 0; n
--) {
1383 base
= env
->cp15
.c6_region
[n
];
1384 if ((base
& 1) == 0) {
1387 mask
= 1 << ((base
>> 1) & 0x1f);
1388 /* Keep this shift separate from the above to avoid an
1389 (undefined) << 32. */
1390 mask
= (mask
<< 1) - 1;
1391 if (((base
^ address
) & ~mask
) == 0) {
1396 fi
->type
= ARMFault_Background
;
1400 if (access_type
== MMU_INST_FETCH
) {
1401 mask
= env
->cp15
.pmsav5_insn_ap
;
1403 mask
= env
->cp15
.pmsav5_data_ap
;
1405 mask
= (mask
>> (n
* 4)) & 0xf;
1408 fi
->type
= ARMFault_Permission
;
1413 fi
->type
= ARMFault_Permission
;
1417 *prot
= PAGE_READ
| PAGE_WRITE
;
1422 *prot
|= PAGE_WRITE
;
1426 *prot
= PAGE_READ
| PAGE_WRITE
;
1430 fi
->type
= ARMFault_Permission
;
1440 /* Bad permission. */
1441 fi
->type
= ARMFault_Permission
;
1449 static void get_phys_addr_pmsav7_default(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
1450 int32_t address
, int *prot
)
1452 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1453 *prot
= PAGE_READ
| PAGE_WRITE
;
1455 case 0xF0000000 ... 0xFFFFFFFF:
1456 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
1457 /* hivecs execing is ok */
1461 case 0x00000000 ... 0x7FFFFFFF:
1466 /* Default system address map for M profile cores.
1467 * The architecture specifies which regions are execute-never;
1468 * at the MPU level no other checks are defined.
1471 case 0x00000000 ... 0x1fffffff: /* ROM */
1472 case 0x20000000 ... 0x3fffffff: /* SRAM */
1473 case 0x60000000 ... 0x7fffffff: /* RAM */
1474 case 0x80000000 ... 0x9fffffff: /* RAM */
1475 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1477 case 0x40000000 ... 0x5fffffff: /* Peripheral */
1478 case 0xa0000000 ... 0xbfffffff: /* Device */
1479 case 0xc0000000 ... 0xdfffffff: /* Device */
1480 case 0xe0000000 ... 0xffffffff: /* System */
1481 *prot
= PAGE_READ
| PAGE_WRITE
;
1484 g_assert_not_reached();
1489 static bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
1491 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1492 return arm_feature(env
, ARM_FEATURE_M
) &&
1493 extract32(address
, 20, 12) == 0xe00;
1496 static bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
1499 * True if address is in the M profile system region
1500 * 0xe0000000 - 0xffffffff
1502 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
1505 static bool pmsav7_use_background_region(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
1509 * Return true if we should use the default memory map as a
1510 * "background" region if there are no hits against any MPU regions.
1512 CPUARMState
*env
= &cpu
->env
;
1518 if (arm_feature(env
, ARM_FEATURE_M
)) {
1519 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
1520 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
1522 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
1526 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
1527 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1528 hwaddr
*phys_ptr
, int *prot
,
1529 target_ulong
*page_size
,
1530 ARMMMUFaultInfo
*fi
)
1532 ARMCPU
*cpu
= env_archcpu(env
);
1534 bool is_user
= regime_is_user(env
, mmu_idx
);
1536 *phys_ptr
= address
;
1537 *page_size
= TARGET_PAGE_SIZE
;
1540 if (regime_translation_disabled(env
, mmu_idx
) ||
1541 m_is_ppb_region(env
, address
)) {
1543 * MPU disabled or M profile PPB access: use default memory map.
1544 * The other case which uses the default memory map in the
1545 * v7M ARM ARM pseudocode is exception vector reads from the vector
1546 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1547 * which always does a direct read using address_space_ldl(), rather
1548 * than going via this function, so we don't need to check that here.
1550 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
1551 } else { /* MPU enabled */
1552 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
1554 uint32_t base
= env
->pmsav7
.drbar
[n
];
1555 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
1559 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
1564 qemu_log_mask(LOG_GUEST_ERROR
,
1565 "DRSR[%d]: Rsize field cannot be 0\n", n
);
1569 rmask
= (1ull << rsize
) - 1;
1572 qemu_log_mask(LOG_GUEST_ERROR
,
1573 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
1574 "to DRSR region size, mask = 0x%" PRIx32
"\n",
1579 if (address
< base
|| address
> base
+ rmask
) {
1581 * Address not in this region. We must check whether the
1582 * region covers addresses in the same page as our address.
1583 * In that case we must not report a size that covers the
1584 * whole page for a subsequent hit against a different MPU
1585 * region or the background region, because it would result in
1586 * incorrect TLB hits for subsequent accesses to addresses that
1587 * are in this MPU region.
1589 if (ranges_overlap(base
, rmask
,
1590 address
& TARGET_PAGE_MASK
,
1591 TARGET_PAGE_SIZE
)) {
1597 /* Region matched */
1599 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
1601 uint32_t srdis_mask
;
1603 rsize
-= 3; /* sub region size (power of 2) */
1604 snd
= ((address
- base
) >> rsize
) & 0x7;
1605 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
1607 srdis_mask
= srdis
? 0x3 : 0x0;
1608 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
1610 * This will check in groups of 2, 4 and then 8, whether
1611 * the subregion bits are consistent. rsize is incremented
1612 * back up to give the region size, considering consistent
1613 * adjacent subregions as one region. Stop testing if rsize
1614 * is already big enough for an entire QEMU page.
1616 int snd_rounded
= snd
& ~(i
- 1);
1617 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
1618 snd_rounded
+ 8, i
);
1619 if (srdis_mask
^ srdis_multi
) {
1622 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
1629 if (rsize
< TARGET_PAGE_BITS
) {
1630 *page_size
= 1 << rsize
;
1635 if (n
== -1) { /* no hits */
1636 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
1637 /* background fault */
1638 fi
->type
= ARMFault_Background
;
1641 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
1642 } else { /* a MPU hit! */
1643 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
1644 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
1646 if (m_is_system_region(env
, address
)) {
1647 /* System space is always execute never */
1651 if (is_user
) { /* User mode AP bit decoding */
1656 break; /* no access */
1658 *prot
|= PAGE_WRITE
;
1662 *prot
|= PAGE_READ
| PAGE_EXEC
;
1665 /* for v7M, same as 6; for R profile a reserved value */
1666 if (arm_feature(env
, ARM_FEATURE_M
)) {
1667 *prot
|= PAGE_READ
| PAGE_EXEC
;
1672 qemu_log_mask(LOG_GUEST_ERROR
,
1673 "DRACR[%d]: Bad value for AP bits: 0x%"
1674 PRIx32
"\n", n
, ap
);
1676 } else { /* Priv. mode AP bits decoding */
1679 break; /* no access */
1683 *prot
|= PAGE_WRITE
;
1687 *prot
|= PAGE_READ
| PAGE_EXEC
;
1690 /* for v7M, same as 6; for R profile a reserved value */
1691 if (arm_feature(env
, ARM_FEATURE_M
)) {
1692 *prot
|= PAGE_READ
| PAGE_EXEC
;
1697 qemu_log_mask(LOG_GUEST_ERROR
,
1698 "DRACR[%d]: Bad value for AP bits: 0x%"
1699 PRIx32
"\n", n
, ap
);
1705 *prot
&= ~PAGE_EXEC
;
1710 fi
->type
= ARMFault_Permission
;
1712 return !(*prot
& (1 << access_type
));
1715 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
1716 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1717 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
1718 int *prot
, bool *is_subpage
,
1719 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
1722 * Perform a PMSAv8 MPU lookup (without also doing the SAU check
1723 * that a full phys-to-virt translation does).
1724 * mregion is (if not NULL) set to the region number which matched,
1725 * or -1 if no region number is returned (MPU off, address did not
1726 * hit a region, address hit in multiple regions).
1727 * We set is_subpage to true if the region hit doesn't cover the
1728 * entire TARGET_PAGE the address is within.
1730 ARMCPU
*cpu
= env_archcpu(env
);
1731 bool is_user
= regime_is_user(env
, mmu_idx
);
1732 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
1734 int matchregion
= -1;
1736 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
1737 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
1739 *is_subpage
= false;
1740 *phys_ptr
= address
;
1747 * Unlike the ARM ARM pseudocode, we don't need to check whether this
1748 * was an exception vector read from the vector table (which is always
1749 * done using the default system address map), because those accesses
1750 * are done in arm_v7m_load_vector(), which always does a direct
1751 * read using address_space_ldl(), rather than going via this function.
1753 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
1755 } else if (m_is_ppb_region(env
, address
)) {
1758 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
1762 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
1765 * Note that the base address is bits [31:5] from the register
1766 * with bits [4:0] all zeroes, but the limit address is bits
1767 * [31:5] from the register with bits [4:0] all ones.
1769 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
1770 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
1772 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
1773 /* Region disabled */
1777 if (address
< base
|| address
> limit
) {
1779 * Address not in this region. We must check whether the
1780 * region covers addresses in the same page as our address.
1781 * In that case we must not report a size that covers the
1782 * whole page for a subsequent hit against a different MPU
1783 * region or the background region, because it would result in
1784 * incorrect TLB hits for subsequent accesses to addresses that
1785 * are in this MPU region.
1787 if (limit
>= base
&&
1788 ranges_overlap(base
, limit
- base
+ 1,
1790 TARGET_PAGE_SIZE
)) {
1796 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
1800 if (matchregion
!= -1) {
1802 * Multiple regions match -- always a failure (unlike
1803 * PMSAv7 where highest-numbered-region wins)
1805 fi
->type
= ARMFault_Permission
;
1816 /* background fault */
1817 fi
->type
= ARMFault_Background
;
1821 if (matchregion
== -1) {
1822 /* hit using the background region */
1823 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
1825 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
1826 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
1829 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
1830 pxn
= extract32(env
->pmsav8
.rlar
[secure
][matchregion
], 4, 1);
1833 if (m_is_system_region(env
, address
)) {
1834 /* System space is always execute never */
1838 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
1839 if (*prot
&& !xn
&& !(pxn
&& !is_user
)) {
1843 * We don't need to look the attribute up in the MAIR0/MAIR1
1844 * registers because that only tells us about cacheability.
1847 *mregion
= matchregion
;
1851 fi
->type
= ARMFault_Permission
;
1853 return !(*prot
& (1 << access_type
));
1856 static bool v8m_is_sau_exempt(CPUARMState
*env
,
1857 uint32_t address
, MMUAccessType access_type
)
1860 * The architecture specifies that certain address ranges are
1861 * exempt from v8M SAU/IDAU checks.
1864 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
1865 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
1866 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
1867 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
1868 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
1869 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
1872 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
1873 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1874 V8M_SAttributes
*sattrs
)
1877 * Look up the security attributes for this address. Compare the
1878 * pseudocode SecurityCheck() function.
1879 * We assume the caller has zero-initialized *sattrs.
1881 ARMCPU
*cpu
= env_archcpu(env
);
1883 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
1884 int idau_region
= IREGION_NOTVALID
;
1885 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
1886 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
1889 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
1890 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
1892 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
1896 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
1897 /* 0xf0000000..0xffffffff is always S for insn fetches */
1901 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
1902 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
1906 if (idau_region
!= IREGION_NOTVALID
) {
1907 sattrs
->irvalid
= true;
1908 sattrs
->iregion
= idau_region
;
1911 switch (env
->sau
.ctrl
& 3) {
1912 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
1914 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
1917 default: /* SAU.ENABLE == 1 */
1918 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
1919 if (env
->sau
.rlar
[r
] & 1) {
1920 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
1921 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
1923 if (base
<= address
&& limit
>= address
) {
1924 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
1925 sattrs
->subpage
= true;
1927 if (sattrs
->srvalid
) {
1929 * If we hit in more than one region then we must report
1930 * as Secure, not NS-Callable, with no valid region
1934 sattrs
->nsc
= false;
1935 sattrs
->sregion
= 0;
1936 sattrs
->srvalid
= false;
1939 if (env
->sau
.rlar
[r
] & 2) {
1944 sattrs
->srvalid
= true;
1945 sattrs
->sregion
= r
;
1949 * Address not in this region. We must check whether the
1950 * region covers addresses in the same page as our address.
1951 * In that case we must not report a size that covers the
1952 * whole page for a subsequent hit against a different MPU
1953 * region or the background region, because it would result
1954 * in incorrect TLB hits for subsequent accesses to
1955 * addresses that are in this MPU region.
1957 if (limit
>= base
&&
1958 ranges_overlap(base
, limit
- base
+ 1,
1960 TARGET_PAGE_SIZE
)) {
1961 sattrs
->subpage
= true;
1970 * The IDAU will override the SAU lookup results if it specifies
1971 * higher security than the SAU does.
1974 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
1976 sattrs
->nsc
= idau_nsc
;
1981 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
1982 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1983 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
1984 int *prot
, target_ulong
*page_size
,
1985 ARMMMUFaultInfo
*fi
)
1987 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
1988 V8M_SAttributes sattrs
= {};
1990 bool mpu_is_subpage
;
1992 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1993 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
1994 if (access_type
== MMU_INST_FETCH
) {
1996 * Instruction fetches always use the MMU bank and the
1997 * transaction attribute determined by the fetch address,
1998 * regardless of CPU state. This is painful for QEMU
1999 * to handle, because it would mean we need to encode
2000 * into the mmu_idx not just the (user, negpri) information
2001 * for the current security state but also that for the
2002 * other security state, which would balloon the number
2003 * of mmu_idx values needed alarmingly.
2004 * Fortunately we can avoid this because it's not actually
2005 * possible to arbitrarily execute code from memory with
2006 * the wrong security attribute: it will always generate
2007 * an exception of some kind or another, apart from the
2008 * special case of an NS CPU executing an SG instruction
2009 * in S&NSC memory. So we always just fail the translation
2010 * here and sort things out in the exception handler
2011 * (including possibly emulating an SG instruction).
2013 if (sattrs
.ns
!= !secure
) {
2015 fi
->type
= ARMFault_QEMU_NSCExec
;
2017 fi
->type
= ARMFault_QEMU_SFault
;
2019 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
2020 *phys_ptr
= address
;
2026 * For data accesses we always use the MMU bank indicated
2027 * by the current CPU state, but the security attributes
2028 * might downgrade a secure access to nonsecure.
2031 txattrs
->secure
= false;
2032 } else if (!secure
) {
2034 * NS access to S memory must fault.
2035 * Architecturally we should first check whether the
2036 * MPU information for this address indicates that we
2037 * are doing an unaligned access to Device memory, which
2038 * should generate a UsageFault instead. QEMU does not
2039 * currently check for that kind of unaligned access though.
2040 * If we added it we would need to do so as a special case
2041 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2043 fi
->type
= ARMFault_QEMU_SFault
;
2044 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
2045 *phys_ptr
= address
;
2052 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
2053 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
2054 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
2059 * Translate from the 4-bit stage 2 representation of
2060 * memory attributes (without cache-allocation hints) to
2061 * the 8-bit representation of the stage 1 MAIR registers
2062 * (which includes allocation hints).
2064 * ref: shared/translation/attrs/S2AttrDecode()
2065 * .../S2ConvertAttrsHints()
2067 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
2069 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
2070 uint8_t loattr
= extract32(s2attrs
, 0, 2);
2071 uint8_t hihint
= 0, lohint
= 0;
2073 if (hiattr
!= 0) { /* normal memory */
2074 if (arm_hcr_el2_eff(env
) & HCR_CD
) { /* cache disabled */
2075 hiattr
= loattr
= 1; /* non-cacheable */
2077 if (hiattr
!= 1) { /* Write-through or write-back */
2078 hihint
= 3; /* RW allocate */
2080 if (loattr
!= 1) { /* Write-through or write-back */
2081 lohint
= 3; /* RW allocate */
2086 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
2090 * Combine either inner or outer cacheability attributes for normal
2091 * memory, according to table D4-42 and pseudocode procedure
2092 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2094 * NB: only stage 1 includes allocation hints (RW bits), leading to
2097 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
2099 if (s1
== 4 || s2
== 4) {
2100 /* non-cacheable has precedence */
2102 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
2103 /* stage 1 write-through takes precedence */
2105 } else if (extract32(s2
, 2, 2) == 2) {
2106 /* stage 2 write-through takes precedence, but the allocation hint
2107 * is still taken from stage 1
2109 return (2 << 2) | extract32(s1
, 0, 2);
2110 } else { /* write-back */
2116 * Combine the memory type and cacheability attributes of
2117 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2118 * combined attributes in MAIR_EL1 format.
2120 static uint8_t combined_attrs_nofwb(CPUARMState
*env
,
2121 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
2123 uint8_t s1lo
, s2lo
, s1hi
, s2hi
, s2_mair_attrs
, ret_attrs
;
2125 s2_mair_attrs
= convert_stage2_attrs(env
, s2
.attrs
);
2127 s1lo
= extract32(s1
.attrs
, 0, 4);
2128 s2lo
= extract32(s2_mair_attrs
, 0, 4);
2129 s1hi
= extract32(s1
.attrs
, 4, 4);
2130 s2hi
= extract32(s2_mair_attrs
, 4, 4);
2132 /* Combine memory type and cacheability attributes */
2133 if (s1hi
== 0 || s2hi
== 0) {
2134 /* Device has precedence over normal */
2135 if (s1lo
== 0 || s2lo
== 0) {
2136 /* nGnRnE has precedence over anything */
2138 } else if (s1lo
== 4 || s2lo
== 4) {
2139 /* non-Reordering has precedence over Reordering */
2140 ret_attrs
= 4; /* nGnRE */
2141 } else if (s1lo
== 8 || s2lo
== 8) {
2142 /* non-Gathering has precedence over Gathering */
2143 ret_attrs
= 8; /* nGRE */
2145 ret_attrs
= 0xc; /* GRE */
2147 } else { /* Normal memory */
2148 /* Outer/inner cacheability combine independently */
2149 ret_attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
2150 | combine_cacheattr_nibble(s1lo
, s2lo
);
2155 static uint8_t force_cacheattr_nibble_wb(uint8_t attr
)
2158 * Given the 4 bits specifying the outer or inner cacheability
2159 * in MAIR format, return a value specifying Normal Write-Back,
2160 * with the allocation and transient hints taken from the input
2161 * if the input specified some kind of cacheable attribute.
2163 if (attr
== 0 || attr
== 4) {
2165 * 0 == an UNPREDICTABLE encoding
2166 * 4 == Non-cacheable
2167 * Either way, force Write-Back RW allocate non-transient
2171 /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2176 * Combine the memory type and cacheability attributes of
2177 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2178 * combined attributes in MAIR_EL1 format.
2180 static uint8_t combined_attrs_fwb(CPUARMState
*env
,
2181 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
2185 /* Use stage 1 attributes */
2189 * Force Normal Write-Back. Note that if S1 is Normal cacheable
2190 * then we take the allocation hints from it; otherwise it is
2191 * RW allocate, non-transient.
2193 if ((s1
.attrs
& 0xf0) == 0) {
2197 /* Need to check the Inner and Outer nibbles separately */
2198 return force_cacheattr_nibble_wb(s1
.attrs
& 0xf) |
2199 force_cacheattr_nibble_wb(s1
.attrs
>> 4) << 4;
2201 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2202 if ((s1
.attrs
& 0xf0) == 0) {
2207 /* Force Device, of subtype specified by S2 */
2208 return s2
.attrs
<< 2;
2211 * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2212 * arbitrarily force Device.
2219 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2220 * and CombineS1S2Desc()
2223 * @s1: Attributes from stage 1 walk
2224 * @s2: Attributes from stage 2 walk
2226 static ARMCacheAttrs
combine_cacheattrs(CPUARMState
*env
,
2227 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
2230 bool tagged
= false;
2232 assert(s2
.is_s2_format
&& !s1
.is_s2_format
);
2233 ret
.is_s2_format
= false;
2235 if (s1
.attrs
== 0xf0) {
2240 /* Combine shareability attributes (table D4-43) */
2241 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
2242 /* if either are outer-shareable, the result is outer-shareable */
2243 ret
.shareability
= 2;
2244 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
2245 /* if either are inner-shareable, the result is inner-shareable */
2246 ret
.shareability
= 3;
2248 /* both non-shareable */
2249 ret
.shareability
= 0;
2252 /* Combine memory type and cacheability attributes */
2253 if (arm_hcr_el2_eff(env
) & HCR_FWB
) {
2254 ret
.attrs
= combined_attrs_fwb(env
, s1
, s2
);
2256 ret
.attrs
= combined_attrs_nofwb(env
, s1
, s2
);
2260 * Any location for which the resultant memory type is any
2261 * type of Device memory is always treated as Outer Shareable.
2262 * Any location for which the resultant memory type is Normal
2263 * Inner Non-cacheable, Outer Non-cacheable is always treated
2264 * as Outer Shareable.
2265 * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2267 if ((ret
.attrs
& 0xf0) == 0 || ret
.attrs
== 0x44) {
2268 ret
.shareability
= 2;
2271 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2272 if (tagged
&& ret
.attrs
== 0xff) {
2280 * get_phys_addr - get the physical address for this virtual address
2282 * Find the physical address corresponding to the given virtual address,
2283 * by doing a translation table walk on MMU based systems or using the
2284 * MPU state on MPU based systems.
2286 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
2287 * prot and page_size may not be filled in, and the populated fsr value provides
2288 * information on why the translation aborted, in the format of a
2289 * DFSR/IFSR fault register, with the following caveats:
2290 * * we honour the short vs long DFSR format differences.
2291 * * the WnR bit is never set (the caller must do this).
2292 * * for PSMAv5 based systems we don't bother to return a full FSR format
2296 * @address: virtual address to get physical address for
2297 * @access_type: 0 for read, 1 for write, 2 for execute
2298 * @mmu_idx: MMU index indicating required translation regime
2299 * @phys_ptr: set to the physical address corresponding to the virtual address
2300 * @attrs: set to the memory transaction attributes to use
2301 * @prot: set to the permissions for the page containing phys_ptr
2302 * @page_size: set to the size of the page containing phys_ptr
2303 * @fi: set to fault info if the translation fails
2304 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
2306 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
2307 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2308 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
2309 target_ulong
*page_size
,
2310 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
2312 ARMMMUIdx s1_mmu_idx
= stage_1_mmu_idx(mmu_idx
);
2314 if (mmu_idx
!= s1_mmu_idx
) {
2316 * Call ourselves recursively to do the stage 1 and then stage 2
2317 * translations if mmu_idx is a two-stage regime.
2319 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2324 ARMCacheAttrs cacheattrs2
= {};
2325 ARMMMUIdx s2_mmu_idx
;
2328 ret
= get_phys_addr(env
, address
, access_type
, s1_mmu_idx
, &ipa
,
2329 attrs
, prot
, page_size
, fi
, cacheattrs
);
2331 /* If S1 fails or S2 is disabled, return early. */
2332 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
2337 ipa_secure
= attrs
->secure
;
2338 if (arm_is_secure_below_el3(env
)) {
2340 attrs
->secure
= !(env
->cp15
.vstcr_el2
.raw_tcr
& VSTCR_SW
);
2342 attrs
->secure
= !(env
->cp15
.vtcr_el2
.raw_tcr
& VTCR_NSW
);
2345 assert(!ipa_secure
);
2348 s2_mmu_idx
= attrs
->secure
? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
2349 is_el0
= mmu_idx
== ARMMMUIdx_E10_0
|| mmu_idx
== ARMMMUIdx_SE10_0
;
2351 /* S1 is done. Now do S2 translation. */
2352 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, s2_mmu_idx
, is_el0
,
2353 phys_ptr
, attrs
, &s2_prot
,
2354 page_size
, fi
, &cacheattrs2
);
2356 /* Combine the S1 and S2 perms. */
2359 /* If S2 fails, return early. */
2364 /* Combine the S1 and S2 cache attributes. */
2365 if (arm_hcr_el2_eff(env
) & HCR_DC
) {
2367 * HCR.DC forces the first stage attributes to
2368 * Normal Non-Shareable,
2369 * Inner Write-Back Read-Allocate Write-Allocate,
2370 * Outer Write-Back Read-Allocate Write-Allocate.
2371 * Do not overwrite Tagged within attrs.
2373 if (cacheattrs
->attrs
!= 0xf0) {
2374 cacheattrs
->attrs
= 0xff;
2376 cacheattrs
->shareability
= 0;
2378 *cacheattrs
= combine_cacheattrs(env
, *cacheattrs
, cacheattrs2
);
2380 /* Check if IPA translates to secure or non-secure PA space. */
2381 if (arm_is_secure_below_el3(env
)) {
2384 !(env
->cp15
.vstcr_el2
.raw_tcr
& (VSTCR_SA
| VSTCR_SW
));
2387 !((env
->cp15
.vtcr_el2
.raw_tcr
& (VTCR_NSA
| VTCR_NSW
))
2388 || (env
->cp15
.vstcr_el2
.raw_tcr
& (VSTCR_SA
| VSTCR_SW
)));
2394 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
2396 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
2401 * The page table entries may downgrade secure to non-secure, but
2402 * cannot upgrade an non-secure translation regime's attributes
2405 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
2406 attrs
->user
= regime_is_user(env
, mmu_idx
);
2409 * Fast Context Switch Extension. This doesn't exist at all in v8.
2410 * In v7 and earlier it affects all stage 1 translations.
2412 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
2413 && !arm_feature(env
, ARM_FEATURE_V8
)) {
2414 if (regime_el(env
, mmu_idx
) == 3) {
2415 address
+= env
->cp15
.fcseidr_s
;
2417 address
+= env
->cp15
.fcseidr_ns
;
2421 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
2423 *page_size
= TARGET_PAGE_SIZE
;
2425 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2427 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
2428 phys_ptr
, attrs
, prot
, page_size
, fi
);
2429 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2431 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
2432 phys_ptr
, prot
, page_size
, fi
);
2435 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
2436 phys_ptr
, prot
, fi
);
2438 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
2439 " mmu_idx %u -> %s (prot %c%c%c)\n",
2440 access_type
== MMU_DATA_LOAD
? "reading" :
2441 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
2442 (uint32_t)address
, mmu_idx
,
2443 ret
? "Miss" : "Hit",
2444 *prot
& PAGE_READ
? 'r' : '-',
2445 *prot
& PAGE_WRITE
? 'w' : '-',
2446 *prot
& PAGE_EXEC
? 'x' : '-');
2451 /* Definitely a real MMU, not an MPU */
2453 if (regime_translation_disabled(env
, mmu_idx
)) {
2458 * MMU disabled. S1 addresses within aa64 translation regimes are
2459 * still checked for bounds -- see AArch64.TranslateAddressS1Off.
2461 if (mmu_idx
!= ARMMMUIdx_Stage2
&& mmu_idx
!= ARMMMUIdx_Stage2_S
) {
2462 int r_el
= regime_el(env
, mmu_idx
);
2463 if (arm_el_is_aa64(env
, r_el
)) {
2464 int pamax
= arm_pamax(env_archcpu(env
));
2465 uint64_t tcr
= env
->cp15
.tcr_el
[r_el
].raw_tcr
;
2468 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
2469 if (access_type
== MMU_INST_FETCH
) {
2470 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
2472 tbi
= (tbi
>> extract64(address
, 55, 1)) & 1;
2473 addrtop
= (tbi
? 55 : 63);
2475 if (extract64(address
, pamax
, addrtop
- pamax
+ 1) != 0) {
2476 fi
->type
= ARMFault_AddressSize
;
2483 * When TBI is disabled, we've just validated that all of the
2484 * bits above PAMax are zero, so logically we only need to
2485 * clear the top byte for TBI. But it's clearer to follow
2486 * the pseudocode set of addrdesc.paddress.
2488 address
= extract64(address
, 0, 52);
2491 *phys_ptr
= address
;
2492 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2493 *page_size
= TARGET_PAGE_SIZE
;
2495 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2496 hcr
= arm_hcr_el2_eff(env
);
2497 cacheattrs
->shareability
= 0;
2498 cacheattrs
->is_s2_format
= false;
2500 if (hcr
& HCR_DCT
) {
2501 memattr
= 0xf0; /* Tagged, Normal, WB, RWA */
2503 memattr
= 0xff; /* Normal, WB, RWA */
2505 } else if (access_type
== MMU_INST_FETCH
) {
2506 if (regime_sctlr(env
, mmu_idx
) & SCTLR_I
) {
2507 memattr
= 0xee; /* Normal, WT, RA, NT */
2509 memattr
= 0x44; /* Normal, NC, No */
2511 cacheattrs
->shareability
= 2; /* outer sharable */
2513 memattr
= 0x00; /* Device, nGnRnE */
2515 cacheattrs
->attrs
= memattr
;
2519 if (regime_using_lpae_format(env
, mmu_idx
)) {
2520 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
, false,
2521 phys_ptr
, attrs
, prot
, page_size
,
2523 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
2524 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
2525 phys_ptr
, attrs
, prot
, page_size
, fi
);
2527 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
2528 phys_ptr
, prot
, page_size
, fi
);
2532 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
2535 ARMCPU
*cpu
= ARM_CPU(cs
);
2536 CPUARMState
*env
= &cpu
->env
;
2538 target_ulong page_size
;
2541 ARMMMUFaultInfo fi
= {};
2542 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
2543 ARMCacheAttrs cacheattrs
= {};
2545 *attrs
= (MemTxAttrs
) {};
2547 ret
= get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &phys_addr
,
2548 attrs
, &prot
, &page_size
, &fi
, &cacheattrs
);