2 * ARM page table walking.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
11 #include "qemu/range.h"
12 #include "exec/exec-all.h"
14 #include "internals.h"
18 typedef struct S1Translate
{
28 static bool get_phys_addr_lpae(CPUARMState
*env
, S1Translate
*ptw
,
30 MMUAccessType access_type
, bool s1_is_el0
,
31 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
32 __attribute__((nonnull
));
34 static bool get_phys_addr_with_struct(CPUARMState
*env
, S1Translate
*ptw
,
36 MMUAccessType access_type
,
37 GetPhysAddrResult
*result
,
39 __attribute__((nonnull
));
41 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
42 static const uint8_t pamax_map
[] = {
52 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
53 unsigned int arm_pamax(ARMCPU
*cpu
)
55 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
56 unsigned int parange
=
57 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
60 * id_aa64mmfr0 is a read-only register so values outside of the
61 * supported mappings can be considered an implementation error.
63 assert(parange
< ARRAY_SIZE(pamax_map
));
64 return pamax_map
[parange
];
68 * In machvirt_init, we call arm_pamax on a cpu that is not fully
69 * initialized, so we can't rely on the propagation done in realize.
71 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
) ||
72 arm_feature(&cpu
->env
, ARM_FEATURE_V7VE
)) {
81 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
83 ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
87 return ARMMMUIdx_Stage1_E0
;
89 return ARMMMUIdx_Stage1_E1
;
90 case ARMMMUIdx_E10_1_PAN
:
91 return ARMMMUIdx_Stage1_E1_PAN
;
97 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
99 return stage_1_mmu_idx(arm_mmu_idx(env
));
102 static bool regime_translation_big_endian(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
104 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
107 static bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
110 case ARMMMUIdx_E20_0
:
111 case ARMMMUIdx_Stage1_E0
:
112 case ARMMMUIdx_MUser
:
113 case ARMMMUIdx_MSUser
:
114 case ARMMMUIdx_MUserNegPri
:
115 case ARMMMUIdx_MSUserNegPri
:
119 case ARMMMUIdx_E10_0
:
120 case ARMMMUIdx_E10_1
:
121 case ARMMMUIdx_E10_1_PAN
:
122 g_assert_not_reached();
126 /* Return the TTBR associated with this translation regime */
127 static uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ttbrn
)
129 if (mmu_idx
== ARMMMUIdx_Stage2
) {
130 return env
->cp15
.vttbr_el2
;
132 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
133 return env
->cp15
.vsttbr_el2
;
136 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
138 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
142 /* Return true if the specified stage of address translation is disabled */
143 static bool regime_translation_disabled(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
148 if (arm_feature(env
, ARM_FEATURE_M
)) {
149 switch (env
->v7m
.mpu_ctrl
[is_secure
] &
150 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
151 case R_V7M_MPU_CTRL_ENABLE_MASK
:
152 /* Enabled, but not for HardFault and NMI */
153 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
154 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
155 /* Enabled for all cases */
160 * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
161 * we warned about that in armv7m_nvic.c when the guest set it.
167 hcr_el2
= arm_hcr_el2_eff_secstate(env
, is_secure
);
170 case ARMMMUIdx_Stage2
:
171 case ARMMMUIdx_Stage2_S
:
172 /* HCR.DC means HCR.VM behaves as 1 */
173 return (hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
175 case ARMMMUIdx_E10_0
:
176 case ARMMMUIdx_E10_1
:
177 case ARMMMUIdx_E10_1_PAN
:
178 /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
179 if (hcr_el2
& HCR_TGE
) {
184 case ARMMMUIdx_Stage1_E0
:
185 case ARMMMUIdx_Stage1_E1
:
186 case ARMMMUIdx_Stage1_E1_PAN
:
187 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
188 if (hcr_el2
& HCR_DC
) {
193 case ARMMMUIdx_E20_0
:
194 case ARMMMUIdx_E20_2
:
195 case ARMMMUIdx_E20_2_PAN
:
200 case ARMMMUIdx_Phys_NS
:
201 case ARMMMUIdx_Phys_S
:
202 /* No translation for physical address spaces. */
206 g_assert_not_reached();
209 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
212 static bool S2_attrs_are_device(uint64_t hcr
, uint8_t attrs
)
215 * For an S1 page table walk, the stage 1 attributes are always
216 * some form of "this is Normal memory". The combined S1+S2
217 * attributes are therefore only Device if stage 2 specifies Device.
218 * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
219 * ie when cacheattrs.attrs bits [3:2] are 0b00.
220 * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
221 * when cacheattrs.attrs bit [2] is 0.
224 return (attrs
& 0x4) == 0;
226 return (attrs
& 0xc) == 0;
230 /* Translate a S1 pagetable walk through S2 if needed. */
231 static bool S1_ptw_translate(CPUARMState
*env
, S1Translate
*ptw
,
232 hwaddr addr
, ARMMMUFaultInfo
*fi
)
234 bool is_secure
= ptw
->in_secure
;
235 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
236 ARMMMUIdx s2_mmu_idx
= is_secure
? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
237 bool s2_phys
= false;
241 if (!arm_mmu_idx_is_stage1_of_2(mmu_idx
)
242 || regime_translation_disabled(env
, s2_mmu_idx
, is_secure
)) {
243 s2_mmu_idx
= is_secure
? ARMMMUIdx_Phys_S
: ARMMMUIdx_Phys_NS
;
247 if (unlikely(ptw
->in_debug
)) {
249 * From gdbstub, do not use softmmu so that we don't modify the
250 * state of the cpu at all, including softmmu tlb contents.
253 ptw
->out_phys
= addr
;
255 pte_secure
= is_secure
;
257 S1Translate s2ptw
= {
258 .in_mmu_idx
= s2_mmu_idx
,
259 .in_secure
= is_secure
,
262 GetPhysAddrResult s2
= { };
263 if (!get_phys_addr_lpae(env
, &s2ptw
, addr
, MMU_DATA_LOAD
,
267 ptw
->out_phys
= s2
.f
.phys_addr
;
268 pte_attrs
= s2
.cacheattrs
.attrs
;
269 pte_secure
= s2
.f
.attrs
.secure
;
271 ptw
->out_host
= NULL
;
273 CPUTLBEntryFull
*full
;
277 flags
= probe_access_full(env
, addr
, MMU_DATA_LOAD
,
278 arm_to_core_mmu_idx(s2_mmu_idx
),
279 true, &ptw
->out_host
, &full
, 0);
282 if (unlikely(flags
& TLB_INVALID_MASK
)) {
285 ptw
->out_phys
= full
->phys_addr
;
286 pte_attrs
= full
->pte_attrs
;
287 pte_secure
= full
->attrs
.secure
;
291 uint64_t hcr
= arm_hcr_el2_eff_secstate(env
, is_secure
);
293 if ((hcr
& HCR_PTW
) && S2_attrs_are_device(hcr
, pte_attrs
)) {
295 * PTW set and S1 walk touched S2 Device memory:
296 * generate Permission fault.
298 fi
->type
= ARMFault_Permission
;
302 fi
->s1ns
= !is_secure
;
307 /* Check if page table walk is to secure or non-secure PA space. */
308 ptw
->out_secure
= (is_secure
310 ? env
->cp15
.vstcr_el2
& VSTCR_SW
311 : env
->cp15
.vtcr_el2
& VTCR_NSW
));
312 ptw
->out_be
= regime_translation_big_endian(env
, mmu_idx
);
316 assert(fi
->type
!= ARMFault_None
);
320 fi
->s1ns
= !is_secure
;
324 /* All loads done in the course of a page table walk go through here. */
325 static uint32_t arm_ldl_ptw(CPUARMState
*env
, S1Translate
*ptw
, hwaddr addr
,
328 CPUState
*cs
= env_cpu(env
);
331 if (!S1_ptw_translate(env
, ptw
, addr
, fi
)) {
337 if (likely(ptw
->out_host
)) {
338 /* Page tables are in RAM, and we have the host address. */
340 data
= ldl_be_p(ptw
->out_host
);
342 data
= ldl_le_p(ptw
->out_host
);
345 /* Page tables are in MMIO. */
346 MemTxAttrs attrs
= { .secure
= ptw
->out_secure
};
347 AddressSpace
*as
= arm_addressspace(cs
, attrs
);
348 MemTxResult result
= MEMTX_OK
;
351 data
= address_space_ldl_be(as
, ptw
->out_phys
, attrs
, &result
);
353 data
= address_space_ldl_le(as
, ptw
->out_phys
, attrs
, &result
);
355 if (unlikely(result
!= MEMTX_OK
)) {
356 fi
->type
= ARMFault_SyncExternalOnWalk
;
357 fi
->ea
= arm_extabort_type(result
);
364 static uint64_t arm_ldq_ptw(CPUARMState
*env
, S1Translate
*ptw
, hwaddr addr
,
367 CPUState
*cs
= env_cpu(env
);
370 if (!S1_ptw_translate(env
, ptw
, addr
, fi
)) {
376 if (likely(ptw
->out_host
)) {
377 /* Page tables are in RAM, and we have the host address. */
379 data
= ldq_be_p(ptw
->out_host
);
381 data
= ldq_le_p(ptw
->out_host
);
384 /* Page tables are in MMIO. */
385 MemTxAttrs attrs
= { .secure
= ptw
->out_secure
};
386 AddressSpace
*as
= arm_addressspace(cs
, attrs
);
387 MemTxResult result
= MEMTX_OK
;
390 data
= address_space_ldq_be(as
, ptw
->out_phys
, attrs
, &result
);
392 data
= address_space_ldq_le(as
, ptw
->out_phys
, attrs
, &result
);
394 if (unlikely(result
!= MEMTX_OK
)) {
395 fi
->type
= ARMFault_SyncExternalOnWalk
;
396 fi
->ea
= arm_extabort_type(result
);
403 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
404 uint32_t *table
, uint32_t address
)
406 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
407 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
408 int maskshift
= extract32(tcr
, 0, 3);
409 uint32_t mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
412 if (address
& mask
) {
413 if (tcr
& TTBCR_PD1
) {
414 /* Translation table walk disabled for TTBR1 */
417 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
419 if (tcr
& TTBCR_PD0
) {
420 /* Translation table walk disabled for TTBR0 */
423 base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
424 *table
= regime_ttbr(env
, mmu_idx
, 0) & base_mask
;
426 *table
|= (address
>> 18) & 0x3ffc;
431 * Translate section/page access permissions to page R/W protection flags
433 * @mmu_idx: MMU index indicating required translation regime
434 * @ap: The 3-bit access permissions (AP[2:0])
435 * @domain_prot: The 2-bit domain access permissions
437 static int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
438 int ap
, int domain_prot
)
440 bool is_user
= regime_is_user(env
, mmu_idx
);
442 if (domain_prot
== 3) {
443 return PAGE_READ
| PAGE_WRITE
;
448 if (arm_feature(env
, ARM_FEATURE_V7
)) {
451 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
453 return is_user
? 0 : PAGE_READ
;
460 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
465 return PAGE_READ
| PAGE_WRITE
;
468 return PAGE_READ
| PAGE_WRITE
;
469 case 4: /* Reserved. */
472 return is_user
? 0 : PAGE_READ
;
476 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
481 g_assert_not_reached();
486 * Translate section/page access permissions to page R/W protection flags.
487 * @ap: The 2-bit simple AP (AP[2:1])
488 * @is_user: TRUE if accessing from PL0
490 static int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
494 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
496 return PAGE_READ
| PAGE_WRITE
;
498 return is_user
? 0 : PAGE_READ
;
502 g_assert_not_reached();
506 static int simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
508 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
511 static bool get_phys_addr_v5(CPUARMState
*env
, S1Translate
*ptw
,
512 uint32_t address
, MMUAccessType access_type
,
513 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
525 /* Pagetable walk. */
526 /* Lookup l1 descriptor. */
527 if (!get_level1_table_address(env
, ptw
->in_mmu_idx
, &table
, address
)) {
528 /* Section translation fault if page walk is disabled by PD0 or PD1 */
529 fi
->type
= ARMFault_Translation
;
532 desc
= arm_ldl_ptw(env
, ptw
, table
, fi
);
533 if (fi
->type
!= ARMFault_None
) {
537 domain
= (desc
>> 5) & 0x0f;
538 if (regime_el(env
, ptw
->in_mmu_idx
) == 1) {
539 dacr
= env
->cp15
.dacr_ns
;
541 dacr
= env
->cp15
.dacr_s
;
543 domain_prot
= (dacr
>> (domain
* 2)) & 3;
545 /* Section translation fault. */
546 fi
->type
= ARMFault_Translation
;
552 if (domain_prot
== 0 || domain_prot
== 2) {
553 fi
->type
= ARMFault_Domain
;
558 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
559 ap
= (desc
>> 10) & 3;
560 result
->f
.lg_page_size
= 20; /* 1MB */
562 /* Lookup l2 entry. */
564 /* Coarse pagetable. */
565 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
567 /* Fine pagetable. */
568 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
570 desc
= arm_ldl_ptw(env
, ptw
, table
, fi
);
571 if (fi
->type
!= ARMFault_None
) {
575 case 0: /* Page translation fault. */
576 fi
->type
= ARMFault_Translation
;
578 case 1: /* 64k page. */
579 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
580 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
581 result
->f
.lg_page_size
= 16;
583 case 2: /* 4k page. */
584 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
585 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
586 result
->f
.lg_page_size
= 12;
588 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
590 /* ARMv6/XScale extended small page format */
591 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
592 || arm_feature(env
, ARM_FEATURE_V6
)) {
593 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
594 result
->f
.lg_page_size
= 12;
597 * UNPREDICTABLE in ARMv5; we choose to take a
598 * page translation fault.
600 fi
->type
= ARMFault_Translation
;
604 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
605 result
->f
.lg_page_size
= 10;
607 ap
= (desc
>> 4) & 3;
610 /* Never happens, but compiler isn't smart enough to tell. */
611 g_assert_not_reached();
614 result
->f
.prot
= ap_to_rw_prot(env
, ptw
->in_mmu_idx
, ap
, domain_prot
);
615 result
->f
.prot
|= result
->f
.prot
? PAGE_EXEC
: 0;
616 if (!(result
->f
.prot
& (1 << access_type
))) {
617 /* Access permission fault. */
618 fi
->type
= ARMFault_Permission
;
621 result
->f
.phys_addr
= phys_addr
;
629 static bool get_phys_addr_v6(CPUARMState
*env
, S1Translate
*ptw
,
630 uint32_t address
, MMUAccessType access_type
,
631 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
633 ARMCPU
*cpu
= env_archcpu(env
);
634 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
648 /* Pagetable walk. */
649 /* Lookup l1 descriptor. */
650 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
651 /* Section translation fault if page walk is disabled by PD0 or PD1 */
652 fi
->type
= ARMFault_Translation
;
655 desc
= arm_ldl_ptw(env
, ptw
, table
, fi
);
656 if (fi
->type
!= ARMFault_None
) {
660 if (type
== 0 || (type
== 3 && !cpu_isar_feature(aa32_pxn
, cpu
))) {
661 /* Section translation fault, or attempt to use the encoding
662 * which is Reserved on implementations without PXN.
664 fi
->type
= ARMFault_Translation
;
667 if ((type
== 1) || !(desc
& (1 << 18))) {
668 /* Page or Section. */
669 domain
= (desc
>> 5) & 0x0f;
671 if (regime_el(env
, mmu_idx
) == 1) {
672 dacr
= env
->cp15
.dacr_ns
;
674 dacr
= env
->cp15
.dacr_s
;
679 domain_prot
= (dacr
>> (domain
* 2)) & 3;
680 if (domain_prot
== 0 || domain_prot
== 2) {
681 /* Section or Page domain fault */
682 fi
->type
= ARMFault_Domain
;
686 if (desc
& (1 << 18)) {
688 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
689 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
690 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
691 result
->f
.lg_page_size
= 24; /* 16MB */
694 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
695 result
->f
.lg_page_size
= 20; /* 1MB */
697 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
698 xn
= desc
& (1 << 4);
700 ns
= extract32(desc
, 19, 1);
702 if (cpu_isar_feature(aa32_pxn
, cpu
)) {
703 pxn
= (desc
>> 2) & 1;
705 ns
= extract32(desc
, 3, 1);
706 /* Lookup l2 entry. */
707 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
708 desc
= arm_ldl_ptw(env
, ptw
, table
, fi
);
709 if (fi
->type
!= ARMFault_None
) {
712 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
714 case 0: /* Page translation fault. */
715 fi
->type
= ARMFault_Translation
;
717 case 1: /* 64k page. */
718 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
719 xn
= desc
& (1 << 15);
720 result
->f
.lg_page_size
= 16;
722 case 2: case 3: /* 4k page. */
723 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
725 result
->f
.lg_page_size
= 12;
728 /* Never happens, but compiler isn't smart enough to tell. */
729 g_assert_not_reached();
732 if (domain_prot
== 3) {
733 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
735 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
738 if (xn
&& access_type
== MMU_INST_FETCH
) {
739 fi
->type
= ARMFault_Permission
;
743 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
744 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
745 /* The simplified model uses AP[0] as an access control bit. */
747 /* Access flag fault. */
748 fi
->type
= ARMFault_AccessFlag
;
751 result
->f
.prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
753 result
->f
.prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
755 if (result
->f
.prot
&& !xn
) {
756 result
->f
.prot
|= PAGE_EXEC
;
758 if (!(result
->f
.prot
& (1 << access_type
))) {
759 /* Access permission fault. */
760 fi
->type
= ARMFault_Permission
;
765 /* The NS bit will (as required by the architecture) have no effect if
766 * the CPU doesn't support TZ or this is a non-secure translation
767 * regime, because the attribute will already be non-secure.
769 result
->f
.attrs
.secure
= false;
771 result
->f
.phys_addr
= phys_addr
;
780 * Translate S2 section/page access permissions to protection flags
782 * @s2ap: The 2-bit stage2 access permissions (S2AP)
783 * @xn: XN (execute-never) bits
784 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
786 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
, bool s1_is_el0
)
797 if (cpu_isar_feature(any_tts2uxn
, env_archcpu(env
))) {
815 g_assert_not_reached();
818 if (!extract32(xn
, 1, 1)) {
819 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
828 * Translate section/page access permissions to protection flags
830 * @mmu_idx: MMU index indicating required translation regime
831 * @is_aa64: TRUE if AArch64
832 * @ap: The 2-bit simple AP (AP[2:1])
833 * @ns: NS (non-secure) bit
834 * @xn: XN (execute-never) bit
835 * @pxn: PXN (privileged execute-never) bit
837 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
838 int ap
, int ns
, int xn
, int pxn
)
840 bool is_user
= regime_is_user(env
, mmu_idx
);
841 int prot_rw
, user_rw
;
845 assert(mmu_idx
!= ARMMMUIdx_Stage2
);
846 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
848 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
852 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
853 /* PAN forbids data accesses but doesn't affect insn fetch */
856 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
860 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
864 /* TODO have_wxn should be replaced with
865 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
866 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
867 * compatible processors have EL2, which is required for [U]WXN.
869 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
872 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
876 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
877 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
879 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
880 switch (regime_el(env
, mmu_idx
)) {
884 xn
= xn
|| !(user_rw
& PAGE_READ
);
888 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
890 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
891 (uwxn
&& (user_rw
& PAGE_WRITE
));
901 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
904 return prot_rw
| PAGE_EXEC
;
907 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
910 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
911 uint32_t el
= regime_el(env
, mmu_idx
);
915 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
917 if (mmu_idx
== ARMMMUIdx_Stage2
) {
919 bool sext
= extract32(tcr
, 4, 1);
920 bool sign
= extract32(tcr
, 3, 1);
923 * If the sign-extend bit is not the same as t0sz[3], the result
924 * is unpredictable. Flag this as a guest error.
927 qemu_log_mask(LOG_GUEST_ERROR
,
928 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
930 tsz
= sextract32(tcr
, 0, 4) + 8;
934 } else if (el
== 2) {
936 tsz
= extract32(tcr
, 0, 3);
938 hpd
= extract64(tcr
, 24, 1);
941 int t0sz
= extract32(tcr
, 0, 3);
942 int t1sz
= extract32(tcr
, 16, 3);
945 select
= va
> (0xffffffffu
>> t0sz
);
947 /* Note that we will detect errors later. */
948 select
= va
>= ~(0xffffffffu
>> t1sz
);
952 epd
= extract32(tcr
, 7, 1);
953 hpd
= extract64(tcr
, 41, 1);
956 epd
= extract32(tcr
, 23, 1);
957 hpd
= extract64(tcr
, 42, 1);
959 /* For aarch32, hpd0 is not enabled without t2e as well. */
960 hpd
&= extract32(tcr
, 6, 1);
963 return (ARMVAParameters
) {
974 * @is_aa64: True if the translation regime is in AArch64 state
975 * @startlevel: Suggested starting level
976 * @inputsize: Bitsize of IPAs
977 * @stride: Page-table stride (See the ARM ARM)
979 * Returns true if the suggested S2 translation parameters are OK and
982 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
983 int inputsize
, int stride
, int outputsize
)
985 const int grainsize
= stride
+ 3;
989 * Negative levels are usually not allowed...
990 * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
991 * begins with level -1. Note that previous feature tests will have
992 * eliminated this combination if it is not enabled.
994 if (level
< (inputsize
== 52 && stride
== 9 ? -1 : 0)) {
998 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
999 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
1005 case 13: /* 64KB Pages. */
1006 if (level
== 0 || (level
== 1 && outputsize
<= 42)) {
1010 case 11: /* 16KB Pages. */
1011 if (level
== 0 || (level
== 1 && outputsize
<= 40)) {
1015 case 9: /* 4KB Pages. */
1016 if (level
== 0 && outputsize
<= 42) {
1021 g_assert_not_reached();
1024 /* Inputsize checks. */
1025 if (inputsize
> outputsize
&&
1026 (arm_el_is_aa64(&cpu
->env
, 1) || inputsize
> 40)) {
1027 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
1031 /* AArch32 only supports 4KB pages. Assert on that. */
1032 assert(stride
== 9);
1042 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
1044 * Returns false if the translation was successful. Otherwise, phys_ptr,
1045 * attrs, prot and page_size may not be filled in, and the populated fsr
1046 * value provides information on why the translation aborted, in the format
1047 * of a long-format DFSR/IFSR fault register, with the following caveat:
1048 * the WnR bit is never set (the caller must do this).
1051 * @ptw: Current and next stage parameters for the walk.
1052 * @address: virtual address to get physical address for
1053 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
1054 * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
1055 * (so this is a stage 2 page table walk),
1056 * must be true if this is stage 2 of a stage 1+2
1057 * walk for an EL0 access. If @mmu_idx is anything else,
1058 * @s1_is_el0 is ignored.
1059 * @result: set on translation success,
1060 * @fi: set to fault info if the translation fails
1062 static bool get_phys_addr_lpae(CPUARMState
*env
, S1Translate
*ptw
,
1064 MMUAccessType access_type
, bool s1_is_el0
,
1065 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
1067 ARMCPU
*cpu
= env_archcpu(env
);
1068 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
1069 bool is_secure
= ptw
->in_secure
;
1070 /* Read an LPAE long-descriptor translation table. */
1071 ARMFaultType fault_type
= ARMFault_Translation
;
1073 ARMVAParameters param
;
1075 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
1076 uint32_t tableattrs
;
1077 target_ulong page_size
;
1080 int addrsize
, inputsize
, outputsize
;
1081 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
1082 int ap
, ns
, xn
, pxn
;
1083 uint32_t el
= regime_el(env
, mmu_idx
);
1084 uint64_t descaddrmask
;
1085 bool aarch64
= arm_el_is_aa64(env
, el
);
1086 bool guarded
= false;
1088 /* TODO: This code does not support shareability levels. */
1092 param
= aa64_va_parameters(env
, address
, mmu_idx
,
1093 access_type
!= MMU_INST_FETCH
);
1097 * If TxSZ is programmed to a value larger than the maximum,
1098 * or smaller than the effective minimum, it is IMPLEMENTATION
1099 * DEFINED whether we behave as if the field were programmed
1100 * within bounds, or if a level 0 Translation fault is generated.
1102 * With FEAT_LVA, fault on less than minimum becomes required,
1103 * so our choice is to always raise the fault.
1105 if (param
.tsz_oob
) {
1106 fault_type
= ARMFault_Translation
;
1110 addrsize
= 64 - 8 * param
.tbi
;
1111 inputsize
= 64 - param
.tsz
;
1114 * Bound PS by PARANGE to find the effective output address size.
1115 * ID_AA64MMFR0 is a read-only register so values outside of the
1116 * supported mappings can be considered an implementation error.
1118 ps
= FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
1119 ps
= MIN(ps
, param
.ps
);
1120 assert(ps
< ARRAY_SIZE(pamax_map
));
1121 outputsize
= pamax_map
[ps
];
1123 param
= aa32_va_parameters(env
, address
, mmu_idx
);
1125 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
1126 inputsize
= addrsize
- param
.tsz
;
1131 * We determined the region when collecting the parameters, but we
1132 * have not yet validated that the address is valid for the region.
1133 * Extract the top bits and verify that they all match select.
1135 * For aa32, if inputsize == addrsize, then we have selected the
1136 * region by exclusion in aa32_va_parameters and there is no more
1137 * validation to do here.
1139 if (inputsize
< addrsize
) {
1140 target_ulong top_bits
= sextract64(address
, inputsize
,
1141 addrsize
- inputsize
);
1142 if (-top_bits
!= param
.select
) {
1143 /* The gap between the two regions is a Translation fault */
1144 fault_type
= ARMFault_Translation
;
1149 stride
= arm_granule_bits(param
.gran
) - 3;
1152 * Note that QEMU ignores shareability and cacheability attributes,
1153 * so we don't need to do anything with the SH, ORGN, IRGN fields
1154 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
1155 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1156 * implement any ASID-like capability so we can ignore it (instead
1157 * we will always flush the TLB any time the ASID is changed).
1159 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
1162 * Here we should have set up all the parameters for the translation:
1163 * inputsize, ttbr, epd, stride, tbi
1168 * Translation table walk disabled => Translation fault on TLB miss
1169 * Note: This is always 0 on 64-bit EL2 and EL3.
1174 if (mmu_idx
!= ARMMMUIdx_Stage2
&& mmu_idx
!= ARMMMUIdx_Stage2_S
) {
1176 * The starting level depends on the virtual address size (which can
1177 * be up to 48 bits) and the translation granule size. It indicates
1178 * the number of strides (stride bits at a time) needed to
1179 * consume the bits of the input address. In the pseudocode this is:
1180 * level = 4 - RoundUp((inputsize - grainsize) / stride)
1181 * where their 'inputsize' is our 'inputsize', 'grainsize' is
1182 * our 'stride + 3' and 'stride' is our 'stride'.
1183 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1184 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1185 * = 4 - (inputsize - 4) / stride;
1187 level
= 4 - (inputsize
- 4) / stride
;
1190 * For stage 2 translations the starting level is specified by the
1191 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
1193 uint32_t sl0
= extract32(tcr
, 6, 2);
1194 uint32_t sl2
= extract64(tcr
, 33, 1);
1195 uint32_t startlevel
;
1198 /* SL2 is RES0 unless DS=1 & 4kb granule. */
1199 if (param
.ds
&& stride
== 9 && sl2
) {
1202 fault_type
= ARMFault_Translation
;
1206 } else if (!aarch64
|| stride
== 9) {
1207 /* AArch32 or 4KB pages */
1208 startlevel
= 2 - sl0
;
1210 if (cpu_isar_feature(aa64_st
, cpu
)) {
1214 /* 16KB or 64KB pages */
1215 startlevel
= 3 - sl0
;
1218 /* Check that the starting level is valid. */
1219 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
1220 inputsize
, stride
, outputsize
);
1222 fault_type
= ARMFault_Translation
;
1228 indexmask_grainsize
= MAKE_64BIT_MASK(0, stride
+ 3);
1229 indexmask
= MAKE_64BIT_MASK(0, inputsize
- (stride
* (4 - level
)));
1231 /* Now we can extract the actual base address from the TTBR */
1232 descaddr
= extract64(ttbr
, 0, 48);
1235 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1237 * Otherwise, if the base address is out of range, raise AddressSizeFault.
1238 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1239 * but we've just cleared the bits above 47, so simplify the test.
1241 if (outputsize
> 48) {
1242 descaddr
|= extract64(ttbr
, 2, 4) << 48;
1243 } else if (descaddr
>> outputsize
) {
1245 fault_type
= ARMFault_AddressSize
;
1250 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1251 * and also to mask out CnP (bit 0) which could validly be non-zero.
1253 descaddr
&= ~indexmask
;
1256 * For AArch32, the address field in the descriptor goes up to bit 39
1257 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
1258 * or an AddressSize fault is raised. So for v8 we extract those SBZ
1259 * bits as part of the address, which will be checked via outputsize.
1260 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1261 * the highest bits of a 52-bit output are placed elsewhere.
1264 descaddrmask
= MAKE_64BIT_MASK(0, 50);
1265 } else if (arm_feature(env
, ARM_FEATURE_V8
)) {
1266 descaddrmask
= MAKE_64BIT_MASK(0, 48);
1268 descaddrmask
= MAKE_64BIT_MASK(0, 40);
1270 descaddrmask
&= ~indexmask_grainsize
;
1273 * Secure accesses start with the page table in secure memory and
1274 * can be downgraded to non-secure at any step. Non-secure accesses
1275 * remain non-secure. We implement this by just ORing in the NSTable/NS
1276 * bits at each step.
1278 tableattrs
= is_secure
? 0 : (1 << 4);
1280 uint64_t descriptor
;
1283 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
1285 nstable
= extract32(tableattrs
, 4, 1);
1286 ptw
->in_secure
= !nstable
;
1287 descriptor
= arm_ldq_ptw(env
, ptw
, descaddr
, fi
);
1288 if (fi
->type
!= ARMFault_None
) {
1292 if (!(descriptor
& 1) ||
1293 (!(descriptor
& 2) && (level
== 3))) {
1294 /* Invalid, or the Reserved level 3 encoding */
1298 descaddr
= descriptor
& descaddrmask
;
1301 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1302 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
1303 * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
1304 * raise AddressSizeFault.
1306 if (outputsize
> 48) {
1308 descaddr
|= extract64(descriptor
, 8, 2) << 50;
1310 descaddr
|= extract64(descriptor
, 12, 4) << 48;
1312 } else if (descaddr
>> outputsize
) {
1313 fault_type
= ARMFault_AddressSize
;
1317 if ((descriptor
& 2) && (level
< 3)) {
1319 * Table entry. The top five bits are attributes which may
1320 * propagate down through lower levels of the table (and
1321 * which are all arranged so that 0 means "no effect", so
1322 * we can gather them up by ORing in the bits at each level).
1324 tableattrs
|= extract64(descriptor
, 59, 5);
1326 indexmask
= indexmask_grainsize
;
1330 * Block entry at level 1 or 2, or page entry at level 3.
1331 * These are basically the same thing, although the number
1332 * of bits we pull in from the vaddr varies. Note that although
1333 * descaddrmask masks enough of the low bits of the descriptor
1334 * to give a correct page or table address, the address field
1335 * in a block descriptor is smaller; so we need to explicitly
1336 * clear the lower bits here before ORing in the low vaddr bits.
1338 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
1339 descaddr
&= ~(hwaddr
)(page_size
- 1);
1340 descaddr
|= (address
& (page_size
- 1));
1341 /* Extract attributes from the descriptor */
1342 attrs
= extract64(descriptor
, 2, 10)
1343 | (extract64(descriptor
, 52, 12) << 10);
1345 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
1346 /* Stage 2 table descriptors do not include any attribute fields */
1349 /* Merge in attributes from table descriptors */
1350 attrs
|= nstable
<< 3; /* NS */
1351 guarded
= extract64(descriptor
, 50, 1); /* GP */
1353 /* HPD disables all the table attributes except NSTable. */
1356 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
1358 * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1359 * means "force PL1 access only", which means forcing AP[1] to 0.
1361 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
1362 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
1366 * Here descaddr is the final physical address, and attributes
1369 fault_type
= ARMFault_AccessFlag
;
1370 if ((attrs
& (1 << 8)) == 0) {
1375 ap
= extract32(attrs
, 4, 2);
1377 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
1378 ns
= mmu_idx
== ARMMMUIdx_Stage2
;
1379 xn
= extract32(attrs
, 11, 2);
1380 result
->f
.prot
= get_S2prot(env
, ap
, xn
, s1_is_el0
);
1382 ns
= extract32(attrs
, 3, 1);
1383 xn
= extract32(attrs
, 12, 1);
1384 pxn
= extract32(attrs
, 11, 1);
1385 result
->f
.prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
1388 fault_type
= ARMFault_Permission
;
1389 if (!(result
->f
.prot
& (1 << access_type
))) {
1395 * The NS bit will (as required by the architecture) have no effect if
1396 * the CPU doesn't support TZ or this is a non-secure translation
1397 * regime, because the attribute will already be non-secure.
1399 result
->f
.attrs
.secure
= false;
1402 /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
1403 if (aarch64
&& cpu_isar_feature(aa64_bti
, cpu
)) {
1404 result
->f
.guarded
= guarded
;
1407 if (mmu_idx
== ARMMMUIdx_Stage2
|| mmu_idx
== ARMMMUIdx_Stage2_S
) {
1408 result
->cacheattrs
.is_s2_format
= true;
1409 result
->cacheattrs
.attrs
= extract32(attrs
, 0, 4);
1411 /* Index into MAIR registers for cache attributes */
1412 uint8_t attrindx
= extract32(attrs
, 0, 3);
1413 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
1414 assert(attrindx
<= 7);
1415 result
->cacheattrs
.is_s2_format
= false;
1416 result
->cacheattrs
.attrs
= extract64(mair
, attrindx
* 8, 8);
1420 * For FEAT_LPA2 and effective DS, the SH field in the attributes
1421 * was re-purposed for output address bits. The SH attribute in
1422 * that case comes from TCR_ELx, which we extracted earlier.
1425 result
->cacheattrs
.shareability
= param
.sh
;
1427 result
->cacheattrs
.shareability
= extract32(attrs
, 6, 2);
1430 result
->f
.phys_addr
= descaddr
;
1431 result
->f
.lg_page_size
= ctz64(page_size
);
1435 fi
->type
= fault_type
;
1437 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
1438 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_Stage2
||
1439 mmu_idx
== ARMMMUIdx_Stage2_S
);
1440 fi
->s1ns
= mmu_idx
== ARMMMUIdx_Stage2
;
1444 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
1445 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1446 bool is_secure
, GetPhysAddrResult
*result
,
1447 ARMMMUFaultInfo
*fi
)
1452 bool is_user
= regime_is_user(env
, mmu_idx
);
1454 if (regime_translation_disabled(env
, mmu_idx
, is_secure
)) {
1456 result
->f
.phys_addr
= address
;
1457 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1461 result
->f
.phys_addr
= address
;
1462 for (n
= 7; n
>= 0; n
--) {
1463 base
= env
->cp15
.c6_region
[n
];
1464 if ((base
& 1) == 0) {
1467 mask
= 1 << ((base
>> 1) & 0x1f);
1468 /* Keep this shift separate from the above to avoid an
1469 (undefined) << 32. */
1470 mask
= (mask
<< 1) - 1;
1471 if (((base
^ address
) & ~mask
) == 0) {
1476 fi
->type
= ARMFault_Background
;
1480 if (access_type
== MMU_INST_FETCH
) {
1481 mask
= env
->cp15
.pmsav5_insn_ap
;
1483 mask
= env
->cp15
.pmsav5_data_ap
;
1485 mask
= (mask
>> (n
* 4)) & 0xf;
1488 fi
->type
= ARMFault_Permission
;
1493 fi
->type
= ARMFault_Permission
;
1497 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
;
1500 result
->f
.prot
= PAGE_READ
;
1502 result
->f
.prot
|= PAGE_WRITE
;
1506 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
;
1510 fi
->type
= ARMFault_Permission
;
1514 result
->f
.prot
= PAGE_READ
;
1517 result
->f
.prot
= PAGE_READ
;
1520 /* Bad permission. */
1521 fi
->type
= ARMFault_Permission
;
1525 result
->f
.prot
|= PAGE_EXEC
;
1529 static void get_phys_addr_pmsav7_default(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
1530 int32_t address
, uint8_t *prot
)
1532 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1533 *prot
= PAGE_READ
| PAGE_WRITE
;
1535 case 0xF0000000 ... 0xFFFFFFFF:
1536 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
1537 /* hivecs execing is ok */
1541 case 0x00000000 ... 0x7FFFFFFF:
1546 /* Default system address map for M profile cores.
1547 * The architecture specifies which regions are execute-never;
1548 * at the MPU level no other checks are defined.
1551 case 0x00000000 ... 0x1fffffff: /* ROM */
1552 case 0x20000000 ... 0x3fffffff: /* SRAM */
1553 case 0x60000000 ... 0x7fffffff: /* RAM */
1554 case 0x80000000 ... 0x9fffffff: /* RAM */
1555 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1557 case 0x40000000 ... 0x5fffffff: /* Peripheral */
1558 case 0xa0000000 ... 0xbfffffff: /* Device */
1559 case 0xc0000000 ... 0xdfffffff: /* Device */
1560 case 0xe0000000 ... 0xffffffff: /* System */
1561 *prot
= PAGE_READ
| PAGE_WRITE
;
1564 g_assert_not_reached();
1569 static bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
1571 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1572 return arm_feature(env
, ARM_FEATURE_M
) &&
1573 extract32(address
, 20, 12) == 0xe00;
1576 static bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
1579 * True if address is in the M profile system region
1580 * 0xe0000000 - 0xffffffff
1582 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
1585 static bool pmsav7_use_background_region(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
1586 bool is_secure
, bool is_user
)
1589 * Return true if we should use the default memory map as a
1590 * "background" region if there are no hits against any MPU regions.
1592 CPUARMState
*env
= &cpu
->env
;
1598 if (arm_feature(env
, ARM_FEATURE_M
)) {
1599 return env
->v7m
.mpu_ctrl
[is_secure
] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
1601 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
1605 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
1606 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1607 bool secure
, GetPhysAddrResult
*result
,
1608 ARMMMUFaultInfo
*fi
)
1610 ARMCPU
*cpu
= env_archcpu(env
);
1612 bool is_user
= regime_is_user(env
, mmu_idx
);
1614 result
->f
.phys_addr
= address
;
1615 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
1618 if (regime_translation_disabled(env
, mmu_idx
, secure
) ||
1619 m_is_ppb_region(env
, address
)) {
1621 * MPU disabled or M profile PPB access: use default memory map.
1622 * The other case which uses the default memory map in the
1623 * v7M ARM ARM pseudocode is exception vector reads from the vector
1624 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1625 * which always does a direct read using address_space_ldl(), rather
1626 * than going via this function, so we don't need to check that here.
1628 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, &result
->f
.prot
);
1629 } else { /* MPU enabled */
1630 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
1632 uint32_t base
= env
->pmsav7
.drbar
[n
];
1633 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
1637 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
1642 qemu_log_mask(LOG_GUEST_ERROR
,
1643 "DRSR[%d]: Rsize field cannot be 0\n", n
);
1647 rmask
= (1ull << rsize
) - 1;
1650 qemu_log_mask(LOG_GUEST_ERROR
,
1651 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
1652 "to DRSR region size, mask = 0x%" PRIx32
"\n",
1657 if (address
< base
|| address
> base
+ rmask
) {
1659 * Address not in this region. We must check whether the
1660 * region covers addresses in the same page as our address.
1661 * In that case we must not report a size that covers the
1662 * whole page for a subsequent hit against a different MPU
1663 * region or the background region, because it would result in
1664 * incorrect TLB hits for subsequent accesses to addresses that
1665 * are in this MPU region.
1667 if (ranges_overlap(base
, rmask
,
1668 address
& TARGET_PAGE_MASK
,
1669 TARGET_PAGE_SIZE
)) {
1670 result
->f
.lg_page_size
= 0;
1675 /* Region matched */
1677 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
1679 uint32_t srdis_mask
;
1681 rsize
-= 3; /* sub region size (power of 2) */
1682 snd
= ((address
- base
) >> rsize
) & 0x7;
1683 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
1685 srdis_mask
= srdis
? 0x3 : 0x0;
1686 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
1688 * This will check in groups of 2, 4 and then 8, whether
1689 * the subregion bits are consistent. rsize is incremented
1690 * back up to give the region size, considering consistent
1691 * adjacent subregions as one region. Stop testing if rsize
1692 * is already big enough for an entire QEMU page.
1694 int snd_rounded
= snd
& ~(i
- 1);
1695 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
1696 snd_rounded
+ 8, i
);
1697 if (srdis_mask
^ srdis_multi
) {
1700 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
1707 if (rsize
< TARGET_PAGE_BITS
) {
1708 result
->f
.lg_page_size
= rsize
;
1713 if (n
== -1) { /* no hits */
1714 if (!pmsav7_use_background_region(cpu
, mmu_idx
, secure
, is_user
)) {
1715 /* background fault */
1716 fi
->type
= ARMFault_Background
;
1719 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
,
1721 } else { /* a MPU hit! */
1722 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
1723 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
1725 if (m_is_system_region(env
, address
)) {
1726 /* System space is always execute never */
1730 if (is_user
) { /* User mode AP bit decoding */
1735 break; /* no access */
1737 result
->f
.prot
|= PAGE_WRITE
;
1741 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
1744 /* for v7M, same as 6; for R profile a reserved value */
1745 if (arm_feature(env
, ARM_FEATURE_M
)) {
1746 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
1751 qemu_log_mask(LOG_GUEST_ERROR
,
1752 "DRACR[%d]: Bad value for AP bits: 0x%"
1753 PRIx32
"\n", n
, ap
);
1755 } else { /* Priv. mode AP bits decoding */
1758 break; /* no access */
1762 result
->f
.prot
|= PAGE_WRITE
;
1766 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
1769 /* for v7M, same as 6; for R profile a reserved value */
1770 if (arm_feature(env
, ARM_FEATURE_M
)) {
1771 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
1776 qemu_log_mask(LOG_GUEST_ERROR
,
1777 "DRACR[%d]: Bad value for AP bits: 0x%"
1778 PRIx32
"\n", n
, ap
);
1784 result
->f
.prot
&= ~PAGE_EXEC
;
1789 fi
->type
= ARMFault_Permission
;
1791 return !(result
->f
.prot
& (1 << access_type
));
1794 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
1795 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1796 bool secure
, GetPhysAddrResult
*result
,
1797 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
1800 * Perform a PMSAv8 MPU lookup (without also doing the SAU check
1801 * that a full phys-to-virt translation does).
1802 * mregion is (if not NULL) set to the region number which matched,
1803 * or -1 if no region number is returned (MPU off, address did not
1804 * hit a region, address hit in multiple regions).
1805 * If the region hit doesn't cover the entire TARGET_PAGE the address
1806 * is within, then we set the result page_size to 1 to force the
1807 * memory system to use a subpage.
1809 ARMCPU
*cpu
= env_archcpu(env
);
1810 bool is_user
= regime_is_user(env
, mmu_idx
);
1812 int matchregion
= -1;
1814 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
1815 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
1817 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
1818 result
->f
.phys_addr
= address
;
1825 * Unlike the ARM ARM pseudocode, we don't need to check whether this
1826 * was an exception vector read from the vector table (which is always
1827 * done using the default system address map), because those accesses
1828 * are done in arm_v7m_load_vector(), which always does a direct
1829 * read using address_space_ldl(), rather than going via this function.
1831 if (regime_translation_disabled(env
, mmu_idx
, secure
)) { /* MPU disabled */
1833 } else if (m_is_ppb_region(env
, address
)) {
1836 if (pmsav7_use_background_region(cpu
, mmu_idx
, secure
, is_user
)) {
1840 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
1843 * Note that the base address is bits [31:5] from the register
1844 * with bits [4:0] all zeroes, but the limit address is bits
1845 * [31:5] from the register with bits [4:0] all ones.
1847 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
1848 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
1850 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
1851 /* Region disabled */
1855 if (address
< base
|| address
> limit
) {
1857 * Address not in this region. We must check whether the
1858 * region covers addresses in the same page as our address.
1859 * In that case we must not report a size that covers the
1860 * whole page for a subsequent hit against a different MPU
1861 * region or the background region, because it would result in
1862 * incorrect TLB hits for subsequent accesses to addresses that
1863 * are in this MPU region.
1865 if (limit
>= base
&&
1866 ranges_overlap(base
, limit
- base
+ 1,
1868 TARGET_PAGE_SIZE
)) {
1869 result
->f
.lg_page_size
= 0;
1874 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
1875 result
->f
.lg_page_size
= 0;
1878 if (matchregion
!= -1) {
1880 * Multiple regions match -- always a failure (unlike
1881 * PMSAv7 where highest-numbered-region wins)
1883 fi
->type
= ARMFault_Permission
;
1894 /* background fault */
1895 fi
->type
= ARMFault_Background
;
1899 if (matchregion
== -1) {
1900 /* hit using the background region */
1901 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, &result
->f
.prot
);
1903 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
1904 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
1907 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
1908 pxn
= extract32(env
->pmsav8
.rlar
[secure
][matchregion
], 4, 1);
1911 if (m_is_system_region(env
, address
)) {
1912 /* System space is always execute never */
1916 result
->f
.prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
1917 if (result
->f
.prot
&& !xn
&& !(pxn
&& !is_user
)) {
1918 result
->f
.prot
|= PAGE_EXEC
;
1921 * We don't need to look the attribute up in the MAIR0/MAIR1
1922 * registers because that only tells us about cacheability.
1925 *mregion
= matchregion
;
1929 fi
->type
= ARMFault_Permission
;
1931 return !(result
->f
.prot
& (1 << access_type
));
1934 static bool v8m_is_sau_exempt(CPUARMState
*env
,
1935 uint32_t address
, MMUAccessType access_type
)
1938 * The architecture specifies that certain address ranges are
1939 * exempt from v8M SAU/IDAU checks.
1942 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
1943 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
1944 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
1945 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
1946 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
1947 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
1950 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
1951 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1952 bool is_secure
, V8M_SAttributes
*sattrs
)
1955 * Look up the security attributes for this address. Compare the
1956 * pseudocode SecurityCheck() function.
1957 * We assume the caller has zero-initialized *sattrs.
1959 ARMCPU
*cpu
= env_archcpu(env
);
1961 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
1962 int idau_region
= IREGION_NOTVALID
;
1963 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
1964 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
1967 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
1968 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
1970 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
1974 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
1975 /* 0xf0000000..0xffffffff is always S for insn fetches */
1979 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
1980 sattrs
->ns
= !is_secure
;
1984 if (idau_region
!= IREGION_NOTVALID
) {
1985 sattrs
->irvalid
= true;
1986 sattrs
->iregion
= idau_region
;
1989 switch (env
->sau
.ctrl
& 3) {
1990 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
1992 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
1995 default: /* SAU.ENABLE == 1 */
1996 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
1997 if (env
->sau
.rlar
[r
] & 1) {
1998 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
1999 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
2001 if (base
<= address
&& limit
>= address
) {
2002 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
2003 sattrs
->subpage
= true;
2005 if (sattrs
->srvalid
) {
2007 * If we hit in more than one region then we must report
2008 * as Secure, not NS-Callable, with no valid region
2012 sattrs
->nsc
= false;
2013 sattrs
->sregion
= 0;
2014 sattrs
->srvalid
= false;
2017 if (env
->sau
.rlar
[r
] & 2) {
2022 sattrs
->srvalid
= true;
2023 sattrs
->sregion
= r
;
2027 * Address not in this region. We must check whether the
2028 * region covers addresses in the same page as our address.
2029 * In that case we must not report a size that covers the
2030 * whole page for a subsequent hit against a different MPU
2031 * region or the background region, because it would result
2032 * in incorrect TLB hits for subsequent accesses to
2033 * addresses that are in this MPU region.
2035 if (limit
>= base
&&
2036 ranges_overlap(base
, limit
- base
+ 1,
2038 TARGET_PAGE_SIZE
)) {
2039 sattrs
->subpage
= true;
2048 * The IDAU will override the SAU lookup results if it specifies
2049 * higher security than the SAU does.
2052 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
2054 sattrs
->nsc
= idau_nsc
;
2059 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
2060 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2061 bool secure
, GetPhysAddrResult
*result
,
2062 ARMMMUFaultInfo
*fi
)
2064 V8M_SAttributes sattrs
= {};
2067 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2068 v8m_security_lookup(env
, address
, access_type
, mmu_idx
,
2070 if (access_type
== MMU_INST_FETCH
) {
2072 * Instruction fetches always use the MMU bank and the
2073 * transaction attribute determined by the fetch address,
2074 * regardless of CPU state. This is painful for QEMU
2075 * to handle, because it would mean we need to encode
2076 * into the mmu_idx not just the (user, negpri) information
2077 * for the current security state but also that for the
2078 * other security state, which would balloon the number
2079 * of mmu_idx values needed alarmingly.
2080 * Fortunately we can avoid this because it's not actually
2081 * possible to arbitrarily execute code from memory with
2082 * the wrong security attribute: it will always generate
2083 * an exception of some kind or another, apart from the
2084 * special case of an NS CPU executing an SG instruction
2085 * in S&NSC memory. So we always just fail the translation
2086 * here and sort things out in the exception handler
2087 * (including possibly emulating an SG instruction).
2089 if (sattrs
.ns
!= !secure
) {
2091 fi
->type
= ARMFault_QEMU_NSCExec
;
2093 fi
->type
= ARMFault_QEMU_SFault
;
2095 result
->f
.lg_page_size
= sattrs
.subpage
? 0 : TARGET_PAGE_BITS
;
2096 result
->f
.phys_addr
= address
;
2102 * For data accesses we always use the MMU bank indicated
2103 * by the current CPU state, but the security attributes
2104 * might downgrade a secure access to nonsecure.
2107 result
->f
.attrs
.secure
= false;
2108 } else if (!secure
) {
2110 * NS access to S memory must fault.
2111 * Architecturally we should first check whether the
2112 * MPU information for this address indicates that we
2113 * are doing an unaligned access to Device memory, which
2114 * should generate a UsageFault instead. QEMU does not
2115 * currently check for that kind of unaligned access though.
2116 * If we added it we would need to do so as a special case
2117 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2119 fi
->type
= ARMFault_QEMU_SFault
;
2120 result
->f
.lg_page_size
= sattrs
.subpage
? 0 : TARGET_PAGE_BITS
;
2121 result
->f
.phys_addr
= address
;
2128 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, secure
,
2130 if (sattrs
.subpage
) {
2131 result
->f
.lg_page_size
= 0;
2137 * Translate from the 4-bit stage 2 representation of
2138 * memory attributes (without cache-allocation hints) to
2139 * the 8-bit representation of the stage 1 MAIR registers
2140 * (which includes allocation hints).
2142 * ref: shared/translation/attrs/S2AttrDecode()
2143 * .../S2ConvertAttrsHints()
2145 static uint8_t convert_stage2_attrs(uint64_t hcr
, uint8_t s2attrs
)
2147 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
2148 uint8_t loattr
= extract32(s2attrs
, 0, 2);
2149 uint8_t hihint
= 0, lohint
= 0;
2151 if (hiattr
!= 0) { /* normal memory */
2152 if (hcr
& HCR_CD
) { /* cache disabled */
2153 hiattr
= loattr
= 1; /* non-cacheable */
2155 if (hiattr
!= 1) { /* Write-through or write-back */
2156 hihint
= 3; /* RW allocate */
2158 if (loattr
!= 1) { /* Write-through or write-back */
2159 lohint
= 3; /* RW allocate */
2164 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
2168 * Combine either inner or outer cacheability attributes for normal
2169 * memory, according to table D4-42 and pseudocode procedure
2170 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2172 * NB: only stage 1 includes allocation hints (RW bits), leading to
2175 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
2177 if (s1
== 4 || s2
== 4) {
2178 /* non-cacheable has precedence */
2180 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
2181 /* stage 1 write-through takes precedence */
2183 } else if (extract32(s2
, 2, 2) == 2) {
2184 /* stage 2 write-through takes precedence, but the allocation hint
2185 * is still taken from stage 1
2187 return (2 << 2) | extract32(s1
, 0, 2);
2188 } else { /* write-back */
2194 * Combine the memory type and cacheability attributes of
2195 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2196 * combined attributes in MAIR_EL1 format.
2198 static uint8_t combined_attrs_nofwb(uint64_t hcr
,
2199 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
2201 uint8_t s1lo
, s2lo
, s1hi
, s2hi
, s2_mair_attrs
, ret_attrs
;
2203 s2_mair_attrs
= convert_stage2_attrs(hcr
, s2
.attrs
);
2205 s1lo
= extract32(s1
.attrs
, 0, 4);
2206 s2lo
= extract32(s2_mair_attrs
, 0, 4);
2207 s1hi
= extract32(s1
.attrs
, 4, 4);
2208 s2hi
= extract32(s2_mair_attrs
, 4, 4);
2210 /* Combine memory type and cacheability attributes */
2211 if (s1hi
== 0 || s2hi
== 0) {
2212 /* Device has precedence over normal */
2213 if (s1lo
== 0 || s2lo
== 0) {
2214 /* nGnRnE has precedence over anything */
2216 } else if (s1lo
== 4 || s2lo
== 4) {
2217 /* non-Reordering has precedence over Reordering */
2218 ret_attrs
= 4; /* nGnRE */
2219 } else if (s1lo
== 8 || s2lo
== 8) {
2220 /* non-Gathering has precedence over Gathering */
2221 ret_attrs
= 8; /* nGRE */
2223 ret_attrs
= 0xc; /* GRE */
2225 } else { /* Normal memory */
2226 /* Outer/inner cacheability combine independently */
2227 ret_attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
2228 | combine_cacheattr_nibble(s1lo
, s2lo
);
2233 static uint8_t force_cacheattr_nibble_wb(uint8_t attr
)
2236 * Given the 4 bits specifying the outer or inner cacheability
2237 * in MAIR format, return a value specifying Normal Write-Back,
2238 * with the allocation and transient hints taken from the input
2239 * if the input specified some kind of cacheable attribute.
2241 if (attr
== 0 || attr
== 4) {
2243 * 0 == an UNPREDICTABLE encoding
2244 * 4 == Non-cacheable
2245 * Either way, force Write-Back RW allocate non-transient
2249 /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2254 * Combine the memory type and cacheability attributes of
2255 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2256 * combined attributes in MAIR_EL1 format.
2258 static uint8_t combined_attrs_fwb(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
2262 /* Use stage 1 attributes */
2266 * Force Normal Write-Back. Note that if S1 is Normal cacheable
2267 * then we take the allocation hints from it; otherwise it is
2268 * RW allocate, non-transient.
2270 if ((s1
.attrs
& 0xf0) == 0) {
2274 /* Need to check the Inner and Outer nibbles separately */
2275 return force_cacheattr_nibble_wb(s1
.attrs
& 0xf) |
2276 force_cacheattr_nibble_wb(s1
.attrs
>> 4) << 4;
2278 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2279 if ((s1
.attrs
& 0xf0) == 0) {
2284 /* Force Device, of subtype specified by S2 */
2285 return s2
.attrs
<< 2;
2288 * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2289 * arbitrarily force Device.
2296 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2297 * and CombineS1S2Desc()
2300 * @s1: Attributes from stage 1 walk
2301 * @s2: Attributes from stage 2 walk
2303 static ARMCacheAttrs
combine_cacheattrs(uint64_t hcr
,
2304 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
2307 bool tagged
= false;
2309 assert(s2
.is_s2_format
&& !s1
.is_s2_format
);
2310 ret
.is_s2_format
= false;
2312 if (s1
.attrs
== 0xf0) {
2317 /* Combine shareability attributes (table D4-43) */
2318 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
2319 /* if either are outer-shareable, the result is outer-shareable */
2320 ret
.shareability
= 2;
2321 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
2322 /* if either are inner-shareable, the result is inner-shareable */
2323 ret
.shareability
= 3;
2325 /* both non-shareable */
2326 ret
.shareability
= 0;
2329 /* Combine memory type and cacheability attributes */
2330 if (hcr
& HCR_FWB
) {
2331 ret
.attrs
= combined_attrs_fwb(s1
, s2
);
2333 ret
.attrs
= combined_attrs_nofwb(hcr
, s1
, s2
);
2337 * Any location for which the resultant memory type is any
2338 * type of Device memory is always treated as Outer Shareable.
2339 * Any location for which the resultant memory type is Normal
2340 * Inner Non-cacheable, Outer Non-cacheable is always treated
2341 * as Outer Shareable.
2342 * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2344 if ((ret
.attrs
& 0xf0) == 0 || ret
.attrs
== 0x44) {
2345 ret
.shareability
= 2;
2348 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2349 if (tagged
&& ret
.attrs
== 0xff) {
2357 * MMU disabled. S1 addresses within aa64 translation regimes are
2358 * still checked for bounds -- see AArch64.S1DisabledOutput().
2360 static bool get_phys_addr_disabled(CPUARMState
*env
, target_ulong address
,
2361 MMUAccessType access_type
,
2362 ARMMMUIdx mmu_idx
, bool is_secure
,
2363 GetPhysAddrResult
*result
,
2364 ARMMMUFaultInfo
*fi
)
2366 uint8_t memattr
= 0x00; /* Device nGnRnE */
2367 uint8_t shareability
= 0; /* non-sharable */
2371 case ARMMMUIdx_Stage2
:
2372 case ARMMMUIdx_Stage2_S
:
2373 case ARMMMUIdx_Phys_NS
:
2374 case ARMMMUIdx_Phys_S
:
2378 r_el
= regime_el(env
, mmu_idx
);
2379 if (arm_el_is_aa64(env
, r_el
)) {
2380 int pamax
= arm_pamax(env_archcpu(env
));
2381 uint64_t tcr
= env
->cp15
.tcr_el
[r_el
];
2384 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
2385 if (access_type
== MMU_INST_FETCH
) {
2386 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
2388 tbi
= (tbi
>> extract64(address
, 55, 1)) & 1;
2389 addrtop
= (tbi
? 55 : 63);
2391 if (extract64(address
, pamax
, addrtop
- pamax
+ 1) != 0) {
2392 fi
->type
= ARMFault_AddressSize
;
2399 * When TBI is disabled, we've just validated that all of the
2400 * bits above PAMax are zero, so logically we only need to
2401 * clear the top byte for TBI. But it's clearer to follow
2402 * the pseudocode set of addrdesc.paddress.
2404 address
= extract64(address
, 0, 52);
2407 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2409 uint64_t hcr
= arm_hcr_el2_eff_secstate(env
, is_secure
);
2411 if (hcr
& HCR_DCT
) {
2412 memattr
= 0xf0; /* Tagged, Normal, WB, RWA */
2414 memattr
= 0xff; /* Normal, WB, RWA */
2418 if (memattr
== 0 && access_type
== MMU_INST_FETCH
) {
2419 if (regime_sctlr(env
, mmu_idx
) & SCTLR_I
) {
2420 memattr
= 0xee; /* Normal, WT, RA, NT */
2422 memattr
= 0x44; /* Normal, NC, No */
2424 shareability
= 2; /* outer sharable */
2426 result
->cacheattrs
.is_s2_format
= false;
2430 result
->f
.phys_addr
= address
;
2431 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2432 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
2433 result
->cacheattrs
.shareability
= shareability
;
2434 result
->cacheattrs
.attrs
= memattr
;
2438 static bool get_phys_addr_twostage(CPUARMState
*env
, S1Translate
*ptw
,
2439 target_ulong address
,
2440 MMUAccessType access_type
,
2441 GetPhysAddrResult
*result
,
2442 ARMMMUFaultInfo
*fi
)
2446 bool is_secure
= ptw
->in_secure
;
2447 bool ret
, ipa_secure
, s2walk_secure
;
2448 ARMCacheAttrs cacheattrs1
;
2452 ret
= get_phys_addr_with_struct(env
, ptw
, address
, access_type
, result
, fi
);
2454 /* If S1 fails or S2 is disabled, return early. */
2455 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_Stage2
, is_secure
)) {
2459 ipa
= result
->f
.phys_addr
;
2460 ipa_secure
= result
->f
.attrs
.secure
;
2462 /* Select TCR based on the NS bit from the S1 walk. */
2463 s2walk_secure
= !(ipa_secure
2464 ? env
->cp15
.vstcr_el2
& VSTCR_SW
2465 : env
->cp15
.vtcr_el2
& VTCR_NSW
);
2467 assert(!ipa_secure
);
2468 s2walk_secure
= false;
2471 is_el0
= ptw
->in_mmu_idx
== ARMMMUIdx_Stage1_E0
;
2472 ptw
->in_mmu_idx
= s2walk_secure
? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
2473 ptw
->in_secure
= s2walk_secure
;
2476 * S1 is done, now do S2 translation.
2477 * Save the stage1 results so that we may merge prot and cacheattrs later.
2479 s1_prot
= result
->f
.prot
;
2480 cacheattrs1
= result
->cacheattrs
;
2481 memset(result
, 0, sizeof(*result
));
2483 ret
= get_phys_addr_lpae(env
, ptw
, ipa
, access_type
, is_el0
, result
, fi
);
2486 /* Combine the S1 and S2 perms. */
2487 result
->f
.prot
&= s1_prot
;
2489 /* If S2 fails, return early. */
2494 /* Combine the S1 and S2 cache attributes. */
2495 hcr
= arm_hcr_el2_eff_secstate(env
, is_secure
);
2498 * HCR.DC forces the first stage attributes to
2499 * Normal Non-Shareable,
2500 * Inner Write-Back Read-Allocate Write-Allocate,
2501 * Outer Write-Back Read-Allocate Write-Allocate.
2502 * Do not overwrite Tagged within attrs.
2504 if (cacheattrs1
.attrs
!= 0xf0) {
2505 cacheattrs1
.attrs
= 0xff;
2507 cacheattrs1
.shareability
= 0;
2509 result
->cacheattrs
= combine_cacheattrs(hcr
, cacheattrs1
,
2510 result
->cacheattrs
);
2513 * Check if IPA translates to secure or non-secure PA space.
2514 * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
2516 result
->f
.attrs
.secure
=
2518 && !(env
->cp15
.vstcr_el2
& (VSTCR_SA
| VSTCR_SW
))
2520 || !(env
->cp15
.vtcr_el2
& (VTCR_NSA
| VTCR_NSW
))));
2525 static bool get_phys_addr_with_struct(CPUARMState
*env
, S1Translate
*ptw
,
2526 target_ulong address
,
2527 MMUAccessType access_type
,
2528 GetPhysAddrResult
*result
,
2529 ARMMMUFaultInfo
*fi
)
2531 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
2532 ARMMMUIdx s1_mmu_idx
= stage_1_mmu_idx(mmu_idx
);
2533 bool is_secure
= ptw
->in_secure
;
2535 if (mmu_idx
!= s1_mmu_idx
) {
2537 * Call ourselves recursively to do the stage 1 and then stage 2
2538 * translations if mmu_idx is a two-stage regime, and EL2 present.
2539 * Otherwise, a stage1+stage2 translation is just stage 1.
2541 ptw
->in_mmu_idx
= mmu_idx
= s1_mmu_idx
;
2542 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2543 return get_phys_addr_twostage(env
, ptw
, address
, access_type
,
2549 * The page table entries may downgrade secure to non-secure, but
2550 * cannot upgrade an non-secure translation regime's attributes
2553 result
->f
.attrs
.secure
= is_secure
;
2554 result
->f
.attrs
.user
= regime_is_user(env
, mmu_idx
);
2557 * Fast Context Switch Extension. This doesn't exist at all in v8.
2558 * In v7 and earlier it affects all stage 1 translations.
2560 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
2561 && !arm_feature(env
, ARM_FEATURE_V8
)) {
2562 if (regime_el(env
, mmu_idx
) == 3) {
2563 address
+= env
->cp15
.fcseidr_s
;
2565 address
+= env
->cp15
.fcseidr_ns
;
2569 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
2571 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
2573 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2575 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
2576 is_secure
, result
, fi
);
2577 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2579 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
2580 is_secure
, result
, fi
);
2583 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
2584 is_secure
, result
, fi
);
2586 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
2587 " mmu_idx %u -> %s (prot %c%c%c)\n",
2588 access_type
== MMU_DATA_LOAD
? "reading" :
2589 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
2590 (uint32_t)address
, mmu_idx
,
2591 ret
? "Miss" : "Hit",
2592 result
->f
.prot
& PAGE_READ
? 'r' : '-',
2593 result
->f
.prot
& PAGE_WRITE
? 'w' : '-',
2594 result
->f
.prot
& PAGE_EXEC
? 'x' : '-');
2599 /* Definitely a real MMU, not an MPU */
2601 if (regime_translation_disabled(env
, mmu_idx
, is_secure
)) {
2602 return get_phys_addr_disabled(env
, address
, access_type
, mmu_idx
,
2603 is_secure
, result
, fi
);
2606 if (regime_using_lpae_format(env
, mmu_idx
)) {
2607 return get_phys_addr_lpae(env
, ptw
, address
, access_type
, false,
2609 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
2610 return get_phys_addr_v6(env
, ptw
, address
, access_type
, result
, fi
);
2612 return get_phys_addr_v5(env
, ptw
, address
, access_type
, result
, fi
);
2616 bool get_phys_addr_with_secure(CPUARMState
*env
, target_ulong address
,
2617 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2618 bool is_secure
, GetPhysAddrResult
*result
,
2619 ARMMMUFaultInfo
*fi
)
2622 .in_mmu_idx
= mmu_idx
,
2623 .in_secure
= is_secure
,
2625 return get_phys_addr_with_struct(env
, &ptw
, address
, access_type
,
2629 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
2630 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2631 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
2636 case ARMMMUIdx_E10_0
:
2637 case ARMMMUIdx_E10_1
:
2638 case ARMMMUIdx_E10_1_PAN
:
2639 case ARMMMUIdx_E20_0
:
2640 case ARMMMUIdx_E20_2
:
2641 case ARMMMUIdx_E20_2_PAN
:
2642 case ARMMMUIdx_Stage1_E0
:
2643 case ARMMMUIdx_Stage1_E1
:
2644 case ARMMMUIdx_Stage1_E1_PAN
:
2646 is_secure
= arm_is_secure_below_el3(env
);
2648 case ARMMMUIdx_Stage2
:
2649 case ARMMMUIdx_Phys_NS
:
2650 case ARMMMUIdx_MPrivNegPri
:
2651 case ARMMMUIdx_MUserNegPri
:
2652 case ARMMMUIdx_MPriv
:
2653 case ARMMMUIdx_MUser
:
2657 case ARMMMUIdx_Stage2_S
:
2658 case ARMMMUIdx_Phys_S
:
2659 case ARMMMUIdx_MSPrivNegPri
:
2660 case ARMMMUIdx_MSUserNegPri
:
2661 case ARMMMUIdx_MSPriv
:
2662 case ARMMMUIdx_MSUser
:
2666 g_assert_not_reached();
2668 return get_phys_addr_with_secure(env
, address
, access_type
, mmu_idx
,
2669 is_secure
, result
, fi
);
2672 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
2675 ARMCPU
*cpu
= ARM_CPU(cs
);
2676 CPUARMState
*env
= &cpu
->env
;
2678 .in_mmu_idx
= arm_mmu_idx(env
),
2679 .in_secure
= arm_is_secure(env
),
2682 GetPhysAddrResult res
= {};
2683 ARMMMUFaultInfo fi
= {};
2686 ret
= get_phys_addr_with_struct(env
, &ptw
, addr
, MMU_DATA_LOAD
, &res
, &fi
);
2687 *attrs
= res
.f
.attrs
;
2692 return res
.f
.phys_addr
;