2 * ARM page table walking.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
11 #include "qemu/range.h"
12 #include "qemu/main-loop.h"
13 #include "exec/exec-all.h"
15 #include "internals.h"
19 typedef struct S1Translate
{
32 static bool get_phys_addr_lpae(CPUARMState
*env
, S1Translate
*ptw
,
34 MMUAccessType access_type
, bool s1_is_el0
,
35 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
36 __attribute__((nonnull
));
38 static bool get_phys_addr_with_struct(CPUARMState
*env
, S1Translate
*ptw
,
40 MMUAccessType access_type
,
41 GetPhysAddrResult
*result
,
43 __attribute__((nonnull
));
45 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
46 static const uint8_t pamax_map
[] = {
56 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
57 unsigned int arm_pamax(ARMCPU
*cpu
)
59 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
60 unsigned int parange
=
61 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
64 * id_aa64mmfr0 is a read-only register so values outside of the
65 * supported mappings can be considered an implementation error.
67 assert(parange
< ARRAY_SIZE(pamax_map
));
68 return pamax_map
[parange
];
72 * In machvirt_init, we call arm_pamax on a cpu that is not fully
73 * initialized, so we can't rely on the propagation done in realize.
75 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
) ||
76 arm_feature(&cpu
->env
, ARM_FEATURE_V7VE
)) {
85 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
87 ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
91 return ARMMMUIdx_Stage1_E0
;
93 return ARMMMUIdx_Stage1_E1
;
94 case ARMMMUIdx_E10_1_PAN
:
95 return ARMMMUIdx_Stage1_E1_PAN
;
101 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
103 return stage_1_mmu_idx(arm_mmu_idx(env
));
106 static bool regime_translation_big_endian(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
108 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
111 /* Return the TTBR associated with this translation regime */
112 static uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ttbrn
)
114 if (mmu_idx
== ARMMMUIdx_Stage2
) {
115 return env
->cp15
.vttbr_el2
;
117 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
118 return env
->cp15
.vsttbr_el2
;
121 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
123 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
127 /* Return true if the specified stage of address translation is disabled */
128 static bool regime_translation_disabled(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
133 if (arm_feature(env
, ARM_FEATURE_M
)) {
134 switch (env
->v7m
.mpu_ctrl
[is_secure
] &
135 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
136 case R_V7M_MPU_CTRL_ENABLE_MASK
:
137 /* Enabled, but not for HardFault and NMI */
138 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
139 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
140 /* Enabled for all cases */
145 * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
146 * we warned about that in armv7m_nvic.c when the guest set it.
152 hcr_el2
= arm_hcr_el2_eff_secstate(env
, is_secure
);
155 case ARMMMUIdx_Stage2
:
156 case ARMMMUIdx_Stage2_S
:
157 /* HCR.DC means HCR.VM behaves as 1 */
158 return (hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
160 case ARMMMUIdx_E10_0
:
161 case ARMMMUIdx_E10_1
:
162 case ARMMMUIdx_E10_1_PAN
:
163 /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
164 if (hcr_el2
& HCR_TGE
) {
169 case ARMMMUIdx_Stage1_E0
:
170 case ARMMMUIdx_Stage1_E1
:
171 case ARMMMUIdx_Stage1_E1_PAN
:
172 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
173 if (hcr_el2
& HCR_DC
) {
178 case ARMMMUIdx_E20_0
:
179 case ARMMMUIdx_E20_2
:
180 case ARMMMUIdx_E20_2_PAN
:
185 case ARMMMUIdx_Phys_NS
:
186 case ARMMMUIdx_Phys_S
:
187 /* No translation for physical address spaces. */
191 g_assert_not_reached();
194 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
197 static bool S2_attrs_are_device(uint64_t hcr
, uint8_t attrs
)
200 * For an S1 page table walk, the stage 1 attributes are always
201 * some form of "this is Normal memory". The combined S1+S2
202 * attributes are therefore only Device if stage 2 specifies Device.
203 * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
204 * ie when cacheattrs.attrs bits [3:2] are 0b00.
205 * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
206 * when cacheattrs.attrs bit [2] is 0.
209 return (attrs
& 0x4) == 0;
211 return (attrs
& 0xc) == 0;
215 /* Translate a S1 pagetable walk through S2 if needed. */
216 static bool S1_ptw_translate(CPUARMState
*env
, S1Translate
*ptw
,
217 hwaddr addr
, ARMMMUFaultInfo
*fi
)
219 bool is_secure
= ptw
->in_secure
;
220 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
221 ARMMMUIdx s2_mmu_idx
= ptw
->in_ptw_idx
;
225 ptw
->out_virt
= addr
;
227 if (unlikely(ptw
->in_debug
)) {
229 * From gdbstub, do not use softmmu so that we don't modify the
230 * state of the cpu at all, including softmmu tlb contents.
232 if (regime_is_stage2(s2_mmu_idx
)) {
233 S1Translate s2ptw
= {
234 .in_mmu_idx
= s2_mmu_idx
,
235 .in_ptw_idx
= is_secure
? ARMMMUIdx_Phys_S
: ARMMMUIdx_Phys_NS
,
236 .in_secure
= is_secure
,
239 GetPhysAddrResult s2
= { };
241 if (get_phys_addr_lpae(env
, &s2ptw
, addr
, MMU_DATA_LOAD
,
245 ptw
->out_phys
= s2
.f
.phys_addr
;
246 pte_attrs
= s2
.cacheattrs
.attrs
;
247 pte_secure
= s2
.f
.attrs
.secure
;
249 /* Regime is physical. */
250 ptw
->out_phys
= addr
;
252 pte_secure
= is_secure
;
254 ptw
->out_host
= NULL
;
258 CPUTLBEntryFull
*full
;
262 flags
= probe_access_full(env
, addr
, 0, MMU_DATA_LOAD
,
263 arm_to_core_mmu_idx(s2_mmu_idx
),
264 true, &ptw
->out_host
, &full
, 0);
267 if (unlikely(flags
& TLB_INVALID_MASK
)) {
270 ptw
->out_phys
= full
->phys_addr
| (addr
& ~TARGET_PAGE_MASK
);
271 ptw
->out_rw
= full
->prot
& PAGE_WRITE
;
272 pte_attrs
= full
->pte_attrs
;
273 pte_secure
= full
->attrs
.secure
;
275 g_assert_not_reached();
279 if (regime_is_stage2(s2_mmu_idx
)) {
280 uint64_t hcr
= arm_hcr_el2_eff_secstate(env
, is_secure
);
282 if ((hcr
& HCR_PTW
) && S2_attrs_are_device(hcr
, pte_attrs
)) {
284 * PTW set and S1 walk touched S2 Device memory:
285 * generate Permission fault.
287 fi
->type
= ARMFault_Permission
;
291 fi
->s1ns
= !is_secure
;
296 /* Check if page table walk is to secure or non-secure PA space. */
297 ptw
->out_secure
= (is_secure
299 ? env
->cp15
.vstcr_el2
& VSTCR_SW
300 : env
->cp15
.vtcr_el2
& VTCR_NSW
));
301 ptw
->out_be
= regime_translation_big_endian(env
, mmu_idx
);
305 assert(fi
->type
!= ARMFault_None
);
309 fi
->s1ns
= !is_secure
;
313 /* All loads done in the course of a page table walk go through here. */
314 static uint32_t arm_ldl_ptw(CPUARMState
*env
, S1Translate
*ptw
,
317 CPUState
*cs
= env_cpu(env
);
318 void *host
= ptw
->out_host
;
322 /* Page tables are in RAM, and we have the host address. */
323 data
= qatomic_read((uint32_t *)host
);
325 data
= be32_to_cpu(data
);
327 data
= le32_to_cpu(data
);
330 /* Page tables are in MMIO. */
331 MemTxAttrs attrs
= { .secure
= ptw
->out_secure
};
332 AddressSpace
*as
= arm_addressspace(cs
, attrs
);
333 MemTxResult result
= MEMTX_OK
;
336 data
= address_space_ldl_be(as
, ptw
->out_phys
, attrs
, &result
);
338 data
= address_space_ldl_le(as
, ptw
->out_phys
, attrs
, &result
);
340 if (unlikely(result
!= MEMTX_OK
)) {
341 fi
->type
= ARMFault_SyncExternalOnWalk
;
342 fi
->ea
= arm_extabort_type(result
);
349 static uint64_t arm_ldq_ptw(CPUARMState
*env
, S1Translate
*ptw
,
352 CPUState
*cs
= env_cpu(env
);
353 void *host
= ptw
->out_host
;
357 /* Page tables are in RAM, and we have the host address. */
358 #ifdef CONFIG_ATOMIC64
359 data
= qatomic_read__nocheck((uint64_t *)host
);
361 data
= be64_to_cpu(data
);
363 data
= le64_to_cpu(data
);
367 data
= ldq_be_p(host
);
369 data
= ldq_le_p(host
);
373 /* Page tables are in MMIO. */
374 MemTxAttrs attrs
= { .secure
= ptw
->out_secure
};
375 AddressSpace
*as
= arm_addressspace(cs
, attrs
);
376 MemTxResult result
= MEMTX_OK
;
379 data
= address_space_ldq_be(as
, ptw
->out_phys
, attrs
, &result
);
381 data
= address_space_ldq_le(as
, ptw
->out_phys
, attrs
, &result
);
383 if (unlikely(result
!= MEMTX_OK
)) {
384 fi
->type
= ARMFault_SyncExternalOnWalk
;
385 fi
->ea
= arm_extabort_type(result
);
392 static uint64_t arm_casq_ptw(CPUARMState
*env
, uint64_t old_val
,
393 uint64_t new_val
, S1Translate
*ptw
,
397 void *host
= ptw
->out_host
;
399 if (unlikely(!host
)) {
400 fi
->type
= ARMFault_UnsuppAtomicUpdate
;
406 * Raising a stage2 Protection fault for an atomic update to a read-only
407 * page is delayed until it is certain that there is a change to make.
409 if (unlikely(!ptw
->out_rw
)) {
414 flags
= probe_access_flags(env
, ptw
->out_virt
, 0, MMU_DATA_STORE
,
415 arm_to_core_mmu_idx(ptw
->in_ptw_idx
),
419 if (unlikely(flags
& TLB_INVALID_MASK
)) {
420 assert(fi
->type
!= ARMFault_None
);
421 fi
->s2addr
= ptw
->out_virt
;
424 fi
->s1ns
= !ptw
->in_secure
;
428 /* In case CAS mismatches and we loop, remember writability. */
432 #ifdef CONFIG_ATOMIC64
434 old_val
= cpu_to_be64(old_val
);
435 new_val
= cpu_to_be64(new_val
);
436 cur_val
= qatomic_cmpxchg__nocheck((uint64_t *)host
, old_val
, new_val
);
437 cur_val
= be64_to_cpu(cur_val
);
439 old_val
= cpu_to_le64(old_val
);
440 new_val
= cpu_to_le64(new_val
);
441 cur_val
= qatomic_cmpxchg__nocheck((uint64_t *)host
, old_val
, new_val
);
442 cur_val
= le64_to_cpu(cur_val
);
446 * We can't support the full 64-bit atomic cmpxchg on the host.
447 * Because this is only used for FEAT_HAFDBS, which is only for AA64,
448 * we know that TCG_OVERSIZED_GUEST is set, which means that we are
449 * running in round-robin mode and could only race with dma i/o.
451 #ifndef TCG_OVERSIZED_GUEST
452 # error "Unexpected configuration"
454 bool locked
= qemu_mutex_iothread_locked();
456 qemu_mutex_lock_iothread();
459 cur_val
= ldq_be_p(host
);
460 if (cur_val
== old_val
) {
461 stq_be_p(host
, new_val
);
464 cur_val
= ldq_le_p(host
);
465 if (cur_val
== old_val
) {
466 stq_le_p(host
, new_val
);
470 qemu_mutex_unlock_iothread();
477 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
478 uint32_t *table
, uint32_t address
)
480 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
481 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
482 int maskshift
= extract32(tcr
, 0, 3);
483 uint32_t mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
486 if (address
& mask
) {
487 if (tcr
& TTBCR_PD1
) {
488 /* Translation table walk disabled for TTBR1 */
491 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
493 if (tcr
& TTBCR_PD0
) {
494 /* Translation table walk disabled for TTBR0 */
497 base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
498 *table
= regime_ttbr(env
, mmu_idx
, 0) & base_mask
;
500 *table
|= (address
>> 18) & 0x3ffc;
505 * Translate section/page access permissions to page R/W protection flags
507 * @mmu_idx: MMU index indicating required translation regime
508 * @ap: The 3-bit access permissions (AP[2:0])
509 * @domain_prot: The 2-bit domain access permissions
510 * @is_user: TRUE if accessing from PL0
512 static int ap_to_rw_prot_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
513 int ap
, int domain_prot
, bool is_user
)
515 if (domain_prot
== 3) {
516 return PAGE_READ
| PAGE_WRITE
;
521 if (arm_feature(env
, ARM_FEATURE_V7
)) {
524 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
526 return is_user
? 0 : PAGE_READ
;
533 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
538 return PAGE_READ
| PAGE_WRITE
;
541 return PAGE_READ
| PAGE_WRITE
;
542 case 4: /* Reserved. */
545 return is_user
? 0 : PAGE_READ
;
549 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
554 g_assert_not_reached();
559 * Translate section/page access permissions to page R/W protection flags
561 * @mmu_idx: MMU index indicating required translation regime
562 * @ap: The 3-bit access permissions (AP[2:0])
563 * @domain_prot: The 2-bit domain access permissions
565 static int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
566 int ap
, int domain_prot
)
568 return ap_to_rw_prot_is_user(env
, mmu_idx
, ap
, domain_prot
,
569 regime_is_user(env
, mmu_idx
));
573 * Translate section/page access permissions to page R/W protection flags.
574 * @ap: The 2-bit simple AP (AP[2:1])
575 * @is_user: TRUE if accessing from PL0
577 static int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
581 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
583 return PAGE_READ
| PAGE_WRITE
;
585 return is_user
? 0 : PAGE_READ
;
589 g_assert_not_reached();
593 static int simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
595 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
598 static bool get_phys_addr_v5(CPUARMState
*env
, S1Translate
*ptw
,
599 uint32_t address
, MMUAccessType access_type
,
600 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
612 /* Pagetable walk. */
613 /* Lookup l1 descriptor. */
614 if (!get_level1_table_address(env
, ptw
->in_mmu_idx
, &table
, address
)) {
615 /* Section translation fault if page walk is disabled by PD0 or PD1 */
616 fi
->type
= ARMFault_Translation
;
619 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
622 desc
= arm_ldl_ptw(env
, ptw
, fi
);
623 if (fi
->type
!= ARMFault_None
) {
627 domain
= (desc
>> 5) & 0x0f;
628 if (regime_el(env
, ptw
->in_mmu_idx
) == 1) {
629 dacr
= env
->cp15
.dacr_ns
;
631 dacr
= env
->cp15
.dacr_s
;
633 domain_prot
= (dacr
>> (domain
* 2)) & 3;
635 /* Section translation fault. */
636 fi
->type
= ARMFault_Translation
;
642 if (domain_prot
== 0 || domain_prot
== 2) {
643 fi
->type
= ARMFault_Domain
;
648 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
649 ap
= (desc
>> 10) & 3;
650 result
->f
.lg_page_size
= 20; /* 1MB */
652 /* Lookup l2 entry. */
654 /* Coarse pagetable. */
655 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
657 /* Fine pagetable. */
658 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
660 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
663 desc
= arm_ldl_ptw(env
, ptw
, fi
);
664 if (fi
->type
!= ARMFault_None
) {
668 case 0: /* Page translation fault. */
669 fi
->type
= ARMFault_Translation
;
671 case 1: /* 64k page. */
672 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
673 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
674 result
->f
.lg_page_size
= 16;
676 case 2: /* 4k page. */
677 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
678 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
679 result
->f
.lg_page_size
= 12;
681 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
683 /* ARMv6/XScale extended small page format */
684 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
685 || arm_feature(env
, ARM_FEATURE_V6
)) {
686 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
687 result
->f
.lg_page_size
= 12;
690 * UNPREDICTABLE in ARMv5; we choose to take a
691 * page translation fault.
693 fi
->type
= ARMFault_Translation
;
697 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
698 result
->f
.lg_page_size
= 10;
700 ap
= (desc
>> 4) & 3;
703 /* Never happens, but compiler isn't smart enough to tell. */
704 g_assert_not_reached();
707 result
->f
.prot
= ap_to_rw_prot(env
, ptw
->in_mmu_idx
, ap
, domain_prot
);
708 result
->f
.prot
|= result
->f
.prot
? PAGE_EXEC
: 0;
709 if (!(result
->f
.prot
& (1 << access_type
))) {
710 /* Access permission fault. */
711 fi
->type
= ARMFault_Permission
;
714 result
->f
.phys_addr
= phys_addr
;
722 static bool get_phys_addr_v6(CPUARMState
*env
, S1Translate
*ptw
,
723 uint32_t address
, MMUAccessType access_type
,
724 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
726 ARMCPU
*cpu
= env_archcpu(env
);
727 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
742 /* Pagetable walk. */
743 /* Lookup l1 descriptor. */
744 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
745 /* Section translation fault if page walk is disabled by PD0 or PD1 */
746 fi
->type
= ARMFault_Translation
;
749 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
752 desc
= arm_ldl_ptw(env
, ptw
, fi
);
753 if (fi
->type
!= ARMFault_None
) {
757 if (type
== 0 || (type
== 3 && !cpu_isar_feature(aa32_pxn
, cpu
))) {
758 /* Section translation fault, or attempt to use the encoding
759 * which is Reserved on implementations without PXN.
761 fi
->type
= ARMFault_Translation
;
764 if ((type
== 1) || !(desc
& (1 << 18))) {
765 /* Page or Section. */
766 domain
= (desc
>> 5) & 0x0f;
768 if (regime_el(env
, mmu_idx
) == 1) {
769 dacr
= env
->cp15
.dacr_ns
;
771 dacr
= env
->cp15
.dacr_s
;
776 domain_prot
= (dacr
>> (domain
* 2)) & 3;
777 if (domain_prot
== 0 || domain_prot
== 2) {
778 /* Section or Page domain fault */
779 fi
->type
= ARMFault_Domain
;
783 if (desc
& (1 << 18)) {
785 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
786 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
787 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
788 result
->f
.lg_page_size
= 24; /* 16MB */
791 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
792 result
->f
.lg_page_size
= 20; /* 1MB */
794 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
795 xn
= desc
& (1 << 4);
797 ns
= extract32(desc
, 19, 1);
799 if (cpu_isar_feature(aa32_pxn
, cpu
)) {
800 pxn
= (desc
>> 2) & 1;
802 ns
= extract32(desc
, 3, 1);
803 /* Lookup l2 entry. */
804 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
805 if (!S1_ptw_translate(env
, ptw
, table
, fi
)) {
808 desc
= arm_ldl_ptw(env
, ptw
, fi
);
809 if (fi
->type
!= ARMFault_None
) {
812 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
814 case 0: /* Page translation fault. */
815 fi
->type
= ARMFault_Translation
;
817 case 1: /* 64k page. */
818 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
819 xn
= desc
& (1 << 15);
820 result
->f
.lg_page_size
= 16;
822 case 2: case 3: /* 4k page. */
823 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
825 result
->f
.lg_page_size
= 12;
828 /* Never happens, but compiler isn't smart enough to tell. */
829 g_assert_not_reached();
832 if (domain_prot
== 3) {
833 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
835 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
838 if (xn
&& access_type
== MMU_INST_FETCH
) {
839 fi
->type
= ARMFault_Permission
;
843 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
844 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
845 /* The simplified model uses AP[0] as an access control bit. */
847 /* Access flag fault. */
848 fi
->type
= ARMFault_AccessFlag
;
851 result
->f
.prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
852 user_prot
= simple_ap_to_rw_prot_is_user(ap
>> 1, 1);
854 result
->f
.prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
855 user_prot
= ap_to_rw_prot_is_user(env
, mmu_idx
, ap
, domain_prot
, 1);
857 if (result
->f
.prot
&& !xn
) {
858 result
->f
.prot
|= PAGE_EXEC
;
860 if (!(result
->f
.prot
& (1 << access_type
))) {
861 /* Access permission fault. */
862 fi
->type
= ARMFault_Permission
;
865 if (regime_is_pan(env
, mmu_idx
) &&
866 !regime_is_user(env
, mmu_idx
) &&
868 access_type
!= MMU_INST_FETCH
) {
869 /* Privileged Access Never fault */
870 fi
->type
= ARMFault_Permission
;
875 /* The NS bit will (as required by the architecture) have no effect if
876 * the CPU doesn't support TZ or this is a non-secure translation
877 * regime, because the attribute will already be non-secure.
879 result
->f
.attrs
.secure
= false;
881 result
->f
.phys_addr
= phys_addr
;
890 * Translate S2 section/page access permissions to protection flags
892 * @s2ap: The 2-bit stage2 access permissions (S2AP)
893 * @xn: XN (execute-never) bits
894 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
896 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
, bool s1_is_el0
)
907 if (cpu_isar_feature(any_tts2uxn
, env_archcpu(env
))) {
925 g_assert_not_reached();
928 if (!extract32(xn
, 1, 1)) {
929 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
938 * Translate section/page access permissions to protection flags
940 * @mmu_idx: MMU index indicating required translation regime
941 * @is_aa64: TRUE if AArch64
942 * @ap: The 2-bit simple AP (AP[2:1])
943 * @ns: NS (non-secure) bit
944 * @xn: XN (execute-never) bit
945 * @pxn: PXN (privileged execute-never) bit
947 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
948 int ap
, int ns
, int xn
, int pxn
)
950 ARMCPU
*cpu
= env_archcpu(env
);
951 bool is_user
= regime_is_user(env
, mmu_idx
);
952 int prot_rw
, user_rw
;
956 assert(!regime_is_stage2(mmu_idx
));
958 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
963 * PAN controls can forbid data accesses but don't affect insn fetch.
964 * Plain PAN forbids data accesses if EL0 has data permissions;
965 * PAN3 forbids data accesses if EL0 has either data or exec perms.
966 * Note that for AArch64 the 'user can exec' case is exactly !xn.
967 * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
968 * do not affect EPAN.
970 if (user_rw
&& regime_is_pan(env
, mmu_idx
)) {
972 } else if (cpu_isar_feature(aa64_pan3
, cpu
) && is_aa64
&&
973 regime_is_pan(env
, mmu_idx
) &&
974 (regime_sctlr(env
, mmu_idx
) & SCTLR_EPAN
) && !xn
) {
977 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
981 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
985 /* TODO have_wxn should be replaced with
986 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
987 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
988 * compatible processors have EL2, which is required for [U]WXN.
990 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
993 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
997 if (regime_has_2_ranges(mmu_idx
) && !is_user
) {
998 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
1000 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
1001 switch (regime_el(env
, mmu_idx
)) {
1005 xn
= xn
|| !(user_rw
& PAGE_READ
);
1009 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
1011 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
1012 (uwxn
&& (user_rw
& PAGE_WRITE
));
1022 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
1025 return prot_rw
| PAGE_EXEC
;
1028 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
1031 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
1032 uint32_t el
= regime_el(env
, mmu_idx
);
1036 assert(mmu_idx
!= ARMMMUIdx_Stage2_S
);
1038 if (mmu_idx
== ARMMMUIdx_Stage2
) {
1040 bool sext
= extract32(tcr
, 4, 1);
1041 bool sign
= extract32(tcr
, 3, 1);
1044 * If the sign-extend bit is not the same as t0sz[3], the result
1045 * is unpredictable. Flag this as a guest error.
1048 qemu_log_mask(LOG_GUEST_ERROR
,
1049 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
1051 tsz
= sextract32(tcr
, 0, 4) + 8;
1055 } else if (el
== 2) {
1057 tsz
= extract32(tcr
, 0, 3);
1059 hpd
= extract64(tcr
, 24, 1);
1062 int t0sz
= extract32(tcr
, 0, 3);
1063 int t1sz
= extract32(tcr
, 16, 3);
1066 select
= va
> (0xffffffffu
>> t0sz
);
1068 /* Note that we will detect errors later. */
1069 select
= va
>= ~(0xffffffffu
>> t1sz
);
1073 epd
= extract32(tcr
, 7, 1);
1074 hpd
= extract64(tcr
, 41, 1);
1077 epd
= extract32(tcr
, 23, 1);
1078 hpd
= extract64(tcr
, 42, 1);
1080 /* For aarch32, hpd0 is not enabled without t2e as well. */
1081 hpd
&= extract32(tcr
, 6, 1);
1084 return (ARMVAParameters
) {
1093 * check_s2_mmu_setup
1095 * @is_aa64: True if the translation regime is in AArch64 state
1096 * @tcr: VTCR_EL2 or VSTCR_EL2
1097 * @ds: Effective value of TCR.DS.
1098 * @iasize: Bitsize of IPAs
1099 * @stride: Page-table stride (See the ARM ARM)
1101 * Decode the starting level of the S2 lookup, returning INT_MIN if
1102 * the configuration is invalid.
1104 static int check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, uint64_t tcr
,
1105 bool ds
, int iasize
, int stride
)
1107 int sl0
, sl2
, startlevel
, granulebits
, levels
;
1108 int s1_min_iasize
, s1_max_iasize
;
1110 sl0
= extract32(tcr
, 6, 2);
1113 * AArch64.S2InvalidTxSZ: While we checked tsz_oob near the top of
1114 * get_phys_addr_lpae, that used aa64_va_parameters which apply
1115 * to aarch64. If Stage1 is aarch32, the min_txsz is larger.
1116 * See AArch64.S2MinTxSZ, where min_tsz is 24, translated to
1117 * inputsize is 64 - 24 = 40.
1119 if (iasize
< 40 && !arm_el_is_aa64(&cpu
->env
, 1)) {
1124 * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
1125 * so interleave AArch64.S2StartLevel.
1129 /* SL2 is RES0 unless DS=1 & 4KB granule. */
1130 sl2
= extract64(tcr
, 33, 1);
1137 startlevel
= 2 - sl0
;
1140 if (arm_pamax(cpu
) < 44) {
1145 if (!cpu_isar_feature(aa64_st
, cpu
)) {
1156 if (arm_pamax(cpu
) < 42) {
1166 startlevel
= 3 - sl0
;
1171 if (arm_pamax(cpu
) < 44) {
1178 startlevel
= 3 - sl0
;
1181 g_assert_not_reached();
1185 * Things are simpler for AArch32 EL2, with only 4k pages.
1186 * There is no separate S2InvalidSL function, but AArch32.S2Walk
1187 * begins with walkparms.sl0 in {'1x'}.
1189 assert(stride
== 9);
1193 startlevel
= 2 - sl0
;
1196 /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */
1197 levels
= 3 - startlevel
;
1198 granulebits
= stride
+ 3;
1200 s1_min_iasize
= levels
* stride
+ granulebits
+ 1;
1201 s1_max_iasize
= s1_min_iasize
+ (stride
- 1) + 4;
1203 if (iasize
>= s1_min_iasize
&& iasize
<= s1_max_iasize
) {
1212 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
1214 * Returns false if the translation was successful. Otherwise, phys_ptr,
1215 * attrs, prot and page_size may not be filled in, and the populated fsr
1216 * value provides information on why the translation aborted, in the format
1217 * of a long-format DFSR/IFSR fault register, with the following caveat:
1218 * the WnR bit is never set (the caller must do this).
1221 * @ptw: Current and next stage parameters for the walk.
1222 * @address: virtual address to get physical address for
1223 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
1224 * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
1225 * (so this is a stage 2 page table walk),
1226 * must be true if this is stage 2 of a stage 1+2
1227 * walk for an EL0 access. If @mmu_idx is anything else,
1228 * @s1_is_el0 is ignored.
1229 * @result: set on translation success,
1230 * @fi: set to fault info if the translation fails
1232 static bool get_phys_addr_lpae(CPUARMState
*env
, S1Translate
*ptw
,
1234 MMUAccessType access_type
, bool s1_is_el0
,
1235 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
1237 ARMCPU
*cpu
= env_archcpu(env
);
1238 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
1239 bool is_secure
= ptw
->in_secure
;
1241 ARMVAParameters param
;
1243 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
1244 uint32_t tableattrs
;
1245 target_ulong page_size
;
1248 int addrsize
, inputsize
, outputsize
;
1249 uint64_t tcr
= regime_tcr(env
, mmu_idx
);
1250 int ap
, ns
, xn
, pxn
;
1251 uint32_t el
= regime_el(env
, mmu_idx
);
1252 uint64_t descaddrmask
;
1253 bool aarch64
= arm_el_is_aa64(env
, el
);
1254 uint64_t descriptor
, new_descriptor
;
1257 /* TODO: This code does not support shareability levels. */
1261 param
= aa64_va_parameters(env
, address
, mmu_idx
,
1262 access_type
!= MMU_INST_FETCH
);
1266 * If TxSZ is programmed to a value larger than the maximum,
1267 * or smaller than the effective minimum, it is IMPLEMENTATION
1268 * DEFINED whether we behave as if the field were programmed
1269 * within bounds, or if a level 0 Translation fault is generated.
1271 * With FEAT_LVA, fault on less than minimum becomes required,
1272 * so our choice is to always raise the fault.
1274 if (param
.tsz_oob
) {
1275 goto do_translation_fault
;
1278 addrsize
= 64 - 8 * param
.tbi
;
1279 inputsize
= 64 - param
.tsz
;
1282 * Bound PS by PARANGE to find the effective output address size.
1283 * ID_AA64MMFR0 is a read-only register so values outside of the
1284 * supported mappings can be considered an implementation error.
1286 ps
= FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
1287 ps
= MIN(ps
, param
.ps
);
1288 assert(ps
< ARRAY_SIZE(pamax_map
));
1289 outputsize
= pamax_map
[ps
];
1292 * With LPA2, the effective output address (OA) size is at most 48 bits
1293 * unless TCR.DS == 1
1295 if (!param
.ds
&& param
.gran
!= Gran64K
) {
1296 outputsize
= MIN(outputsize
, 48);
1299 param
= aa32_va_parameters(env
, address
, mmu_idx
);
1301 addrsize
= (mmu_idx
== ARMMMUIdx_Stage2
? 40 : 32);
1302 inputsize
= addrsize
- param
.tsz
;
1307 * We determined the region when collecting the parameters, but we
1308 * have not yet validated that the address is valid for the region.
1309 * Extract the top bits and verify that they all match select.
1311 * For aa32, if inputsize == addrsize, then we have selected the
1312 * region by exclusion in aa32_va_parameters and there is no more
1313 * validation to do here.
1315 if (inputsize
< addrsize
) {
1316 target_ulong top_bits
= sextract64(address
, inputsize
,
1317 addrsize
- inputsize
);
1318 if (-top_bits
!= param
.select
) {
1319 /* The gap between the two regions is a Translation fault */
1320 goto do_translation_fault
;
1324 stride
= arm_granule_bits(param
.gran
) - 3;
1327 * Note that QEMU ignores shareability and cacheability attributes,
1328 * so we don't need to do anything with the SH, ORGN, IRGN fields
1329 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
1330 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1331 * implement any ASID-like capability so we can ignore it (instead
1332 * we will always flush the TLB any time the ASID is changed).
1334 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
1337 * Here we should have set up all the parameters for the translation:
1338 * inputsize, ttbr, epd, stride, tbi
1343 * Translation table walk disabled => Translation fault on TLB miss
1344 * Note: This is always 0 on 64-bit EL2 and EL3.
1346 goto do_translation_fault
;
1349 if (!regime_is_stage2(mmu_idx
)) {
1351 * The starting level depends on the virtual address size (which can
1352 * be up to 48 bits) and the translation granule size. It indicates
1353 * the number of strides (stride bits at a time) needed to
1354 * consume the bits of the input address. In the pseudocode this is:
1355 * level = 4 - RoundUp((inputsize - grainsize) / stride)
1356 * where their 'inputsize' is our 'inputsize', 'grainsize' is
1357 * our 'stride + 3' and 'stride' is our 'stride'.
1358 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1359 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1360 * = 4 - (inputsize - 4) / stride;
1362 level
= 4 - (inputsize
- 4) / stride
;
1364 int startlevel
= check_s2_mmu_setup(cpu
, aarch64
, tcr
, param
.ds
,
1366 if (startlevel
== INT_MIN
) {
1368 goto do_translation_fault
;
1373 indexmask_grainsize
= MAKE_64BIT_MASK(0, stride
+ 3);
1374 indexmask
= MAKE_64BIT_MASK(0, inputsize
- (stride
* (4 - level
)));
1376 /* Now we can extract the actual base address from the TTBR */
1377 descaddr
= extract64(ttbr
, 0, 48);
1380 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1382 * Otherwise, if the base address is out of range, raise AddressSizeFault.
1383 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1384 * but we've just cleared the bits above 47, so simplify the test.
1386 if (outputsize
> 48) {
1387 descaddr
|= extract64(ttbr
, 2, 4) << 48;
1388 } else if (descaddr
>> outputsize
) {
1390 fi
->type
= ARMFault_AddressSize
;
1395 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1396 * and also to mask out CnP (bit 0) which could validly be non-zero.
1398 descaddr
&= ~indexmask
;
1401 * For AArch32, the address field in the descriptor goes up to bit 39
1402 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
1403 * or an AddressSize fault is raised. So for v8 we extract those SBZ
1404 * bits as part of the address, which will be checked via outputsize.
1405 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1406 * the highest bits of a 52-bit output are placed elsewhere.
1409 descaddrmask
= MAKE_64BIT_MASK(0, 50);
1410 } else if (arm_feature(env
, ARM_FEATURE_V8
)) {
1411 descaddrmask
= MAKE_64BIT_MASK(0, 48);
1413 descaddrmask
= MAKE_64BIT_MASK(0, 40);
1415 descaddrmask
&= ~indexmask_grainsize
;
1418 * Secure accesses start with the page table in secure memory and
1419 * can be downgraded to non-secure at any step. Non-secure accesses
1420 * remain non-secure. We implement this by just ORing in the NSTable/NS
1421 * bits at each step.
1423 tableattrs
= is_secure
? 0 : (1 << 4);
1426 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
1428 nstable
= extract32(tableattrs
, 4, 1);
1431 * Stage2_S -> Stage2 or Phys_S -> Phys_NS
1432 * Assert that the non-secure idx are even, and relative order.
1434 QEMU_BUILD_BUG_ON((ARMMMUIdx_Phys_NS
& 1) != 0);
1435 QEMU_BUILD_BUG_ON((ARMMMUIdx_Stage2
& 1) != 0);
1436 QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS
+ 1 != ARMMMUIdx_Phys_S
);
1437 QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2
+ 1 != ARMMMUIdx_Stage2_S
);
1438 ptw
->in_ptw_idx
&= ~1;
1439 ptw
->in_secure
= false;
1441 if (!S1_ptw_translate(env
, ptw
, descaddr
, fi
)) {
1444 descriptor
= arm_ldq_ptw(env
, ptw
, fi
);
1445 if (fi
->type
!= ARMFault_None
) {
1448 new_descriptor
= descriptor
;
1450 restart_atomic_update
:
1451 if (!(descriptor
& 1) || (!(descriptor
& 2) && (level
== 3))) {
1452 /* Invalid, or the Reserved level 3 encoding */
1453 goto do_translation_fault
;
1456 descaddr
= descriptor
& descaddrmask
;
1459 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1460 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
1461 * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
1462 * raise AddressSizeFault.
1464 if (outputsize
> 48) {
1466 descaddr
|= extract64(descriptor
, 8, 2) << 50;
1468 descaddr
|= extract64(descriptor
, 12, 4) << 48;
1470 } else if (descaddr
>> outputsize
) {
1471 fi
->type
= ARMFault_AddressSize
;
1475 if ((descriptor
& 2) && (level
< 3)) {
1477 * Table entry. The top five bits are attributes which may
1478 * propagate down through lower levels of the table (and
1479 * which are all arranged so that 0 means "no effect", so
1480 * we can gather them up by ORing in the bits at each level).
1482 tableattrs
|= extract64(descriptor
, 59, 5);
1484 indexmask
= indexmask_grainsize
;
1489 * Block entry at level 1 or 2, or page entry at level 3.
1490 * These are basically the same thing, although the number
1491 * of bits we pull in from the vaddr varies. Note that although
1492 * descaddrmask masks enough of the low bits of the descriptor
1493 * to give a correct page or table address, the address field
1494 * in a block descriptor is smaller; so we need to explicitly
1495 * clear the lower bits here before ORing in the low vaddr bits.
1497 * Afterward, descaddr is the final physical address.
1499 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
1500 descaddr
&= ~(hwaddr
)(page_size
- 1);
1501 descaddr
|= (address
& (page_size
- 1));
1503 if (likely(!ptw
->in_debug
)) {
1506 * If HA is enabled, prepare to update the descriptor below.
1507 * Otherwise, pass the access fault on to software.
1509 if (!(descriptor
& (1 << 10))) {
1511 new_descriptor
|= 1 << 10; /* AF */
1513 fi
->type
= ARMFault_AccessFlag
;
1520 * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP
1521 * bit for writeback. The actual write protection test may still be
1522 * overridden by tableattrs, to be merged below.
1525 && extract64(descriptor
, 51, 1) /* DBM */
1526 && access_type
== MMU_DATA_STORE
) {
1527 if (regime_is_stage2(mmu_idx
)) {
1528 new_descriptor
|= 1ull << 7; /* set S2AP[1] */
1530 new_descriptor
&= ~(1ull << 7); /* clear AP[2] */
1536 * Extract attributes from the (modified) descriptor, and apply
1537 * table descriptors. Stage 2 table descriptors do not include
1538 * any attribute fields. HPD disables all the table attributes
1541 attrs
= new_descriptor
& (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
1542 if (!regime_is_stage2(mmu_idx
)) {
1543 attrs
|= nstable
<< 5; /* NS */
1545 attrs
|= extract64(tableattrs
, 0, 2) << 53; /* XN, PXN */
1547 * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1548 * means "force PL1 access only", which means forcing AP[1] to 0.
1550 attrs
&= ~(extract64(tableattrs
, 2, 1) << 6); /* !APT[0] => AP[1] */
1551 attrs
|= extract32(tableattrs
, 3, 1) << 7; /* APT[1] => AP[2] */
1555 ap
= extract32(attrs
, 6, 2);
1556 if (regime_is_stage2(mmu_idx
)) {
1557 ns
= mmu_idx
== ARMMMUIdx_Stage2
;
1558 xn
= extract64(attrs
, 53, 2);
1559 result
->f
.prot
= get_S2prot(env
, ap
, xn
, s1_is_el0
);
1561 ns
= extract32(attrs
, 5, 1);
1562 xn
= extract64(attrs
, 54, 1);
1563 pxn
= extract64(attrs
, 53, 1);
1564 result
->f
.prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
1567 if (!(result
->f
.prot
& (1 << access_type
))) {
1568 fi
->type
= ARMFault_Permission
;
1572 /* If FEAT_HAFDBS has made changes, update the PTE. */
1573 if (new_descriptor
!= descriptor
) {
1574 new_descriptor
= arm_casq_ptw(env
, descriptor
, new_descriptor
, ptw
, fi
);
1575 if (fi
->type
!= ARMFault_None
) {
1579 * I_YZSVV says that if the in-memory descriptor has changed,
1580 * then we must use the information in that new value
1581 * (which might include a different output address, different
1582 * attributes, or generate a fault).
1583 * Restart the handling of the descriptor value from scratch.
1585 if (new_descriptor
!= descriptor
) {
1586 descriptor
= new_descriptor
;
1587 goto restart_atomic_update
;
1593 * The NS bit will (as required by the architecture) have no effect if
1594 * the CPU doesn't support TZ or this is a non-secure translation
1595 * regime, because the attribute will already be non-secure.
1597 result
->f
.attrs
.secure
= false;
1600 if (regime_is_stage2(mmu_idx
)) {
1601 result
->cacheattrs
.is_s2_format
= true;
1602 result
->cacheattrs
.attrs
= extract32(attrs
, 2, 4);
1604 /* Index into MAIR registers for cache attributes */
1605 uint8_t attrindx
= extract32(attrs
, 2, 3);
1606 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
1607 assert(attrindx
<= 7);
1608 result
->cacheattrs
.is_s2_format
= false;
1609 result
->cacheattrs
.attrs
= extract64(mair
, attrindx
* 8, 8);
1611 /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
1612 if (aarch64
&& cpu_isar_feature(aa64_bti
, cpu
)) {
1613 result
->f
.guarded
= extract64(attrs
, 50, 1); /* GP */
1618 * For FEAT_LPA2 and effective DS, the SH field in the attributes
1619 * was re-purposed for output address bits. The SH attribute in
1620 * that case comes from TCR_ELx, which we extracted earlier.
1623 result
->cacheattrs
.shareability
= param
.sh
;
1625 result
->cacheattrs
.shareability
= extract32(attrs
, 8, 2);
1628 result
->f
.phys_addr
= descaddr
;
1629 result
->f
.lg_page_size
= ctz64(page_size
);
1632 do_translation_fault
:
1633 fi
->type
= ARMFault_Translation
;
1636 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
1637 fi
->stage2
= fi
->s1ptw
|| regime_is_stage2(mmu_idx
);
1638 fi
->s1ns
= mmu_idx
== ARMMMUIdx_Stage2
;
1642 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
1643 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1644 bool is_secure
, GetPhysAddrResult
*result
,
1645 ARMMMUFaultInfo
*fi
)
1650 bool is_user
= regime_is_user(env
, mmu_idx
);
1652 if (regime_translation_disabled(env
, mmu_idx
, is_secure
)) {
1654 result
->f
.phys_addr
= address
;
1655 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1659 result
->f
.phys_addr
= address
;
1660 for (n
= 7; n
>= 0; n
--) {
1661 base
= env
->cp15
.c6_region
[n
];
1662 if ((base
& 1) == 0) {
1665 mask
= 1 << ((base
>> 1) & 0x1f);
1666 /* Keep this shift separate from the above to avoid an
1667 (undefined) << 32. */
1668 mask
= (mask
<< 1) - 1;
1669 if (((base
^ address
) & ~mask
) == 0) {
1674 fi
->type
= ARMFault_Background
;
1678 if (access_type
== MMU_INST_FETCH
) {
1679 mask
= env
->cp15
.pmsav5_insn_ap
;
1681 mask
= env
->cp15
.pmsav5_data_ap
;
1683 mask
= (mask
>> (n
* 4)) & 0xf;
1686 fi
->type
= ARMFault_Permission
;
1691 fi
->type
= ARMFault_Permission
;
1695 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
;
1698 result
->f
.prot
= PAGE_READ
;
1700 result
->f
.prot
|= PAGE_WRITE
;
1704 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
;
1708 fi
->type
= ARMFault_Permission
;
1712 result
->f
.prot
= PAGE_READ
;
1715 result
->f
.prot
= PAGE_READ
;
1718 /* Bad permission. */
1719 fi
->type
= ARMFault_Permission
;
1723 result
->f
.prot
|= PAGE_EXEC
;
1727 static void get_phys_addr_pmsav7_default(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
1728 int32_t address
, uint8_t *prot
)
1730 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1731 *prot
= PAGE_READ
| PAGE_WRITE
;
1733 case 0xF0000000 ... 0xFFFFFFFF:
1734 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
1735 /* hivecs execing is ok */
1739 case 0x00000000 ... 0x7FFFFFFF:
1744 /* Default system address map for M profile cores.
1745 * The architecture specifies which regions are execute-never;
1746 * at the MPU level no other checks are defined.
1749 case 0x00000000 ... 0x1fffffff: /* ROM */
1750 case 0x20000000 ... 0x3fffffff: /* SRAM */
1751 case 0x60000000 ... 0x7fffffff: /* RAM */
1752 case 0x80000000 ... 0x9fffffff: /* RAM */
1753 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1755 case 0x40000000 ... 0x5fffffff: /* Peripheral */
1756 case 0xa0000000 ... 0xbfffffff: /* Device */
1757 case 0xc0000000 ... 0xdfffffff: /* Device */
1758 case 0xe0000000 ... 0xffffffff: /* System */
1759 *prot
= PAGE_READ
| PAGE_WRITE
;
1762 g_assert_not_reached();
1767 static bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
1769 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1770 return arm_feature(env
, ARM_FEATURE_M
) &&
1771 extract32(address
, 20, 12) == 0xe00;
1774 static bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
1777 * True if address is in the M profile system region
1778 * 0xe0000000 - 0xffffffff
1780 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
1783 static bool pmsav7_use_background_region(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
1784 bool is_secure
, bool is_user
)
1787 * Return true if we should use the default memory map as a
1788 * "background" region if there are no hits against any MPU regions.
1790 CPUARMState
*env
= &cpu
->env
;
1796 if (arm_feature(env
, ARM_FEATURE_M
)) {
1797 return env
->v7m
.mpu_ctrl
[is_secure
] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
1800 if (mmu_idx
== ARMMMUIdx_Stage2
) {
1804 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
1807 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
1808 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1809 bool secure
, GetPhysAddrResult
*result
,
1810 ARMMMUFaultInfo
*fi
)
1812 ARMCPU
*cpu
= env_archcpu(env
);
1814 bool is_user
= regime_is_user(env
, mmu_idx
);
1816 result
->f
.phys_addr
= address
;
1817 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
1820 if (regime_translation_disabled(env
, mmu_idx
, secure
) ||
1821 m_is_ppb_region(env
, address
)) {
1823 * MPU disabled or M profile PPB access: use default memory map.
1824 * The other case which uses the default memory map in the
1825 * v7M ARM ARM pseudocode is exception vector reads from the vector
1826 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1827 * which always does a direct read using address_space_ldl(), rather
1828 * than going via this function, so we don't need to check that here.
1830 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, &result
->f
.prot
);
1831 } else { /* MPU enabled */
1832 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
1834 uint32_t base
= env
->pmsav7
.drbar
[n
];
1835 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
1839 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
1844 qemu_log_mask(LOG_GUEST_ERROR
,
1845 "DRSR[%d]: Rsize field cannot be 0\n", n
);
1849 rmask
= (1ull << rsize
) - 1;
1852 qemu_log_mask(LOG_GUEST_ERROR
,
1853 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
1854 "to DRSR region size, mask = 0x%" PRIx32
"\n",
1859 if (address
< base
|| address
> base
+ rmask
) {
1861 * Address not in this region. We must check whether the
1862 * region covers addresses in the same page as our address.
1863 * In that case we must not report a size that covers the
1864 * whole page for a subsequent hit against a different MPU
1865 * region or the background region, because it would result in
1866 * incorrect TLB hits for subsequent accesses to addresses that
1867 * are in this MPU region.
1869 if (ranges_overlap(base
, rmask
,
1870 address
& TARGET_PAGE_MASK
,
1871 TARGET_PAGE_SIZE
)) {
1872 result
->f
.lg_page_size
= 0;
1877 /* Region matched */
1879 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
1881 uint32_t srdis_mask
;
1883 rsize
-= 3; /* sub region size (power of 2) */
1884 snd
= ((address
- base
) >> rsize
) & 0x7;
1885 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
1887 srdis_mask
= srdis
? 0x3 : 0x0;
1888 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
1890 * This will check in groups of 2, 4 and then 8, whether
1891 * the subregion bits are consistent. rsize is incremented
1892 * back up to give the region size, considering consistent
1893 * adjacent subregions as one region. Stop testing if rsize
1894 * is already big enough for an entire QEMU page.
1896 int snd_rounded
= snd
& ~(i
- 1);
1897 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
1898 snd_rounded
+ 8, i
);
1899 if (srdis_mask
^ srdis_multi
) {
1902 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
1909 if (rsize
< TARGET_PAGE_BITS
) {
1910 result
->f
.lg_page_size
= rsize
;
1915 if (n
== -1) { /* no hits */
1916 if (!pmsav7_use_background_region(cpu
, mmu_idx
, secure
, is_user
)) {
1917 /* background fault */
1918 fi
->type
= ARMFault_Background
;
1921 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
,
1923 } else { /* a MPU hit! */
1924 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
1925 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
1927 if (m_is_system_region(env
, address
)) {
1928 /* System space is always execute never */
1932 if (is_user
) { /* User mode AP bit decoding */
1937 break; /* no access */
1939 result
->f
.prot
|= PAGE_WRITE
;
1943 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
1946 /* for v7M, same as 6; for R profile a reserved value */
1947 if (arm_feature(env
, ARM_FEATURE_M
)) {
1948 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
1953 qemu_log_mask(LOG_GUEST_ERROR
,
1954 "DRACR[%d]: Bad value for AP bits: 0x%"
1955 PRIx32
"\n", n
, ap
);
1957 } else { /* Priv. mode AP bits decoding */
1960 break; /* no access */
1964 result
->f
.prot
|= PAGE_WRITE
;
1968 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
1971 /* for v7M, same as 6; for R profile a reserved value */
1972 if (arm_feature(env
, ARM_FEATURE_M
)) {
1973 result
->f
.prot
|= PAGE_READ
| PAGE_EXEC
;
1978 qemu_log_mask(LOG_GUEST_ERROR
,
1979 "DRACR[%d]: Bad value for AP bits: 0x%"
1980 PRIx32
"\n", n
, ap
);
1986 result
->f
.prot
&= ~PAGE_EXEC
;
1991 fi
->type
= ARMFault_Permission
;
1993 return !(result
->f
.prot
& (1 << access_type
));
1996 static uint32_t *regime_rbar(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
1999 if (regime_el(env
, mmu_idx
) == 2) {
2000 return env
->pmsav8
.hprbar
;
2002 return env
->pmsav8
.rbar
[secure
];
2006 static uint32_t *regime_rlar(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
2009 if (regime_el(env
, mmu_idx
) == 2) {
2010 return env
->pmsav8
.hprlar
;
2012 return env
->pmsav8
.rlar
[secure
];
2016 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
2017 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2018 bool secure
, GetPhysAddrResult
*result
,
2019 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
2022 * Perform a PMSAv8 MPU lookup (without also doing the SAU check
2023 * that a full phys-to-virt translation does).
2024 * mregion is (if not NULL) set to the region number which matched,
2025 * or -1 if no region number is returned (MPU off, address did not
2026 * hit a region, address hit in multiple regions).
2027 * If the region hit doesn't cover the entire TARGET_PAGE the address
2028 * is within, then we set the result page_size to 1 to force the
2029 * memory system to use a subpage.
2031 ARMCPU
*cpu
= env_archcpu(env
);
2032 bool is_user
= regime_is_user(env
, mmu_idx
);
2034 int matchregion
= -1;
2036 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
2037 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
2040 if (regime_el(env
, mmu_idx
) == 2) {
2041 region_counter
= cpu
->pmsav8r_hdregion
;
2043 region_counter
= cpu
->pmsav7_dregion
;
2046 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
2047 result
->f
.phys_addr
= address
;
2053 if (mmu_idx
== ARMMMUIdx_Stage2
) {
2058 * Unlike the ARM ARM pseudocode, we don't need to check whether this
2059 * was an exception vector read from the vector table (which is always
2060 * done using the default system address map), because those accesses
2061 * are done in arm_v7m_load_vector(), which always does a direct
2062 * read using address_space_ldl(), rather than going via this function.
2064 if (regime_translation_disabled(env
, mmu_idx
, secure
)) { /* MPU disabled */
2066 } else if (m_is_ppb_region(env
, address
)) {
2069 if (pmsav7_use_background_region(cpu
, mmu_idx
, secure
, is_user
)) {
2074 if (arm_feature(env
, ARM_FEATURE_M
)) {
2081 for (n
= region_counter
- 1; n
>= 0; n
--) {
2084 * Note that the base address is bits [31:x] from the register
2085 * with bits [x-1:0] all zeroes, but the limit address is bits
2086 * [31:x] from the register with bits [x:0] all ones. Where x is
2087 * 5 for Cortex-M and 6 for Cortex-R
2089 uint32_t base
= regime_rbar(env
, mmu_idx
, secure
)[n
] & ~bitmask
;
2090 uint32_t limit
= regime_rlar(env
, mmu_idx
, secure
)[n
] | bitmask
;
2092 if (!(regime_rlar(env
, mmu_idx
, secure
)[n
] & 0x1)) {
2093 /* Region disabled */
2097 if (address
< base
|| address
> limit
) {
2099 * Address not in this region. We must check whether the
2100 * region covers addresses in the same page as our address.
2101 * In that case we must not report a size that covers the
2102 * whole page for a subsequent hit against a different MPU
2103 * region or the background region, because it would result in
2104 * incorrect TLB hits for subsequent accesses to addresses that
2105 * are in this MPU region.
2107 if (limit
>= base
&&
2108 ranges_overlap(base
, limit
- base
+ 1,
2110 TARGET_PAGE_SIZE
)) {
2111 result
->f
.lg_page_size
= 0;
2116 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
2117 result
->f
.lg_page_size
= 0;
2120 if (matchregion
!= -1) {
2122 * Multiple regions match -- always a failure (unlike
2123 * PMSAv7 where highest-numbered-region wins)
2125 fi
->type
= ARMFault_Permission
;
2126 if (arm_feature(env
, ARM_FEATURE_M
)) {
2138 if (arm_feature(env
, ARM_FEATURE_M
)) {
2139 fi
->type
= ARMFault_Background
;
2141 fi
->type
= ARMFault_Permission
;
2146 if (matchregion
== -1) {
2147 /* hit using the background region */
2148 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, &result
->f
.prot
);
2150 uint32_t matched_rbar
= regime_rbar(env
, mmu_idx
, secure
)[matchregion
];
2151 uint32_t matched_rlar
= regime_rlar(env
, mmu_idx
, secure
)[matchregion
];
2152 uint32_t ap
= extract32(matched_rbar
, 1, 2);
2153 uint32_t xn
= extract32(matched_rbar
, 0, 1);
2156 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
2157 pxn
= extract32(matched_rlar
, 4, 1);
2160 if (m_is_system_region(env
, address
)) {
2161 /* System space is always execute never */
2165 if (regime_el(env
, mmu_idx
) == 2) {
2166 result
->f
.prot
= simple_ap_to_rw_prot_is_user(ap
,
2167 mmu_idx
!= ARMMMUIdx_E2
);
2169 result
->f
.prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
2172 if (!arm_feature(env
, ARM_FEATURE_M
)) {
2173 uint8_t attrindx
= extract32(matched_rlar
, 1, 3);
2174 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
2175 uint8_t sh
= extract32(matched_rlar
, 3, 2);
2177 if (regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
&&
2178 result
->f
.prot
& PAGE_WRITE
&& mmu_idx
!= ARMMMUIdx_Stage2
) {
2182 if ((regime_el(env
, mmu_idx
) == 1) &&
2183 regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
&& ap
== 0x1) {
2187 result
->cacheattrs
.is_s2_format
= false;
2188 result
->cacheattrs
.attrs
= extract64(mair
, attrindx
* 8, 8);
2189 result
->cacheattrs
.shareability
= sh
;
2192 if (result
->f
.prot
&& !xn
&& !(pxn
&& !is_user
)) {
2193 result
->f
.prot
|= PAGE_EXEC
;
2197 *mregion
= matchregion
;
2201 fi
->type
= ARMFault_Permission
;
2202 if (arm_feature(env
, ARM_FEATURE_M
)) {
2205 return !(result
->f
.prot
& (1 << access_type
));
2208 static bool v8m_is_sau_exempt(CPUARMState
*env
,
2209 uint32_t address
, MMUAccessType access_type
)
2212 * The architecture specifies that certain address ranges are
2213 * exempt from v8M SAU/IDAU checks.
2216 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
2217 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
2218 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
2219 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
2220 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
2221 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
2224 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
2225 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2226 bool is_secure
, V8M_SAttributes
*sattrs
)
2229 * Look up the security attributes for this address. Compare the
2230 * pseudocode SecurityCheck() function.
2231 * We assume the caller has zero-initialized *sattrs.
2233 ARMCPU
*cpu
= env_archcpu(env
);
2235 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
2236 int idau_region
= IREGION_NOTVALID
;
2237 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
2238 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
2241 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
2242 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
2244 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
2248 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
2249 /* 0xf0000000..0xffffffff is always S for insn fetches */
2253 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
2254 sattrs
->ns
= !is_secure
;
2258 if (idau_region
!= IREGION_NOTVALID
) {
2259 sattrs
->irvalid
= true;
2260 sattrs
->iregion
= idau_region
;
2263 switch (env
->sau
.ctrl
& 3) {
2264 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
2266 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
2269 default: /* SAU.ENABLE == 1 */
2270 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
2271 if (env
->sau
.rlar
[r
] & 1) {
2272 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
2273 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
2275 if (base
<= address
&& limit
>= address
) {
2276 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
2277 sattrs
->subpage
= true;
2279 if (sattrs
->srvalid
) {
2281 * If we hit in more than one region then we must report
2282 * as Secure, not NS-Callable, with no valid region
2286 sattrs
->nsc
= false;
2287 sattrs
->sregion
= 0;
2288 sattrs
->srvalid
= false;
2291 if (env
->sau
.rlar
[r
] & 2) {
2296 sattrs
->srvalid
= true;
2297 sattrs
->sregion
= r
;
2301 * Address not in this region. We must check whether the
2302 * region covers addresses in the same page as our address.
2303 * In that case we must not report a size that covers the
2304 * whole page for a subsequent hit against a different MPU
2305 * region or the background region, because it would result
2306 * in incorrect TLB hits for subsequent accesses to
2307 * addresses that are in this MPU region.
2309 if (limit
>= base
&&
2310 ranges_overlap(base
, limit
- base
+ 1,
2312 TARGET_PAGE_SIZE
)) {
2313 sattrs
->subpage
= true;
2322 * The IDAU will override the SAU lookup results if it specifies
2323 * higher security than the SAU does.
2326 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
2328 sattrs
->nsc
= idau_nsc
;
2333 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
2334 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2335 bool secure
, GetPhysAddrResult
*result
,
2336 ARMMMUFaultInfo
*fi
)
2338 V8M_SAttributes sattrs
= {};
2341 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
2342 v8m_security_lookup(env
, address
, access_type
, mmu_idx
,
2344 if (access_type
== MMU_INST_FETCH
) {
2346 * Instruction fetches always use the MMU bank and the
2347 * transaction attribute determined by the fetch address,
2348 * regardless of CPU state. This is painful for QEMU
2349 * to handle, because it would mean we need to encode
2350 * into the mmu_idx not just the (user, negpri) information
2351 * for the current security state but also that for the
2352 * other security state, which would balloon the number
2353 * of mmu_idx values needed alarmingly.
2354 * Fortunately we can avoid this because it's not actually
2355 * possible to arbitrarily execute code from memory with
2356 * the wrong security attribute: it will always generate
2357 * an exception of some kind or another, apart from the
2358 * special case of an NS CPU executing an SG instruction
2359 * in S&NSC memory. So we always just fail the translation
2360 * here and sort things out in the exception handler
2361 * (including possibly emulating an SG instruction).
2363 if (sattrs
.ns
!= !secure
) {
2365 fi
->type
= ARMFault_QEMU_NSCExec
;
2367 fi
->type
= ARMFault_QEMU_SFault
;
2369 result
->f
.lg_page_size
= sattrs
.subpage
? 0 : TARGET_PAGE_BITS
;
2370 result
->f
.phys_addr
= address
;
2376 * For data accesses we always use the MMU bank indicated
2377 * by the current CPU state, but the security attributes
2378 * might downgrade a secure access to nonsecure.
2381 result
->f
.attrs
.secure
= false;
2382 } else if (!secure
) {
2384 * NS access to S memory must fault.
2385 * Architecturally we should first check whether the
2386 * MPU information for this address indicates that we
2387 * are doing an unaligned access to Device memory, which
2388 * should generate a UsageFault instead. QEMU does not
2389 * currently check for that kind of unaligned access though.
2390 * If we added it we would need to do so as a special case
2391 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2393 fi
->type
= ARMFault_QEMU_SFault
;
2394 result
->f
.lg_page_size
= sattrs
.subpage
? 0 : TARGET_PAGE_BITS
;
2395 result
->f
.phys_addr
= address
;
2402 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, secure
,
2404 if (sattrs
.subpage
) {
2405 result
->f
.lg_page_size
= 0;
2411 * Translate from the 4-bit stage 2 representation of
2412 * memory attributes (without cache-allocation hints) to
2413 * the 8-bit representation of the stage 1 MAIR registers
2414 * (which includes allocation hints).
2416 * ref: shared/translation/attrs/S2AttrDecode()
2417 * .../S2ConvertAttrsHints()
2419 static uint8_t convert_stage2_attrs(uint64_t hcr
, uint8_t s2attrs
)
2421 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
2422 uint8_t loattr
= extract32(s2attrs
, 0, 2);
2423 uint8_t hihint
= 0, lohint
= 0;
2425 if (hiattr
!= 0) { /* normal memory */
2426 if (hcr
& HCR_CD
) { /* cache disabled */
2427 hiattr
= loattr
= 1; /* non-cacheable */
2429 if (hiattr
!= 1) { /* Write-through or write-back */
2430 hihint
= 3; /* RW allocate */
2432 if (loattr
!= 1) { /* Write-through or write-back */
2433 lohint
= 3; /* RW allocate */
2438 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
2442 * Combine either inner or outer cacheability attributes for normal
2443 * memory, according to table D4-42 and pseudocode procedure
2444 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2446 * NB: only stage 1 includes allocation hints (RW bits), leading to
2449 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
2451 if (s1
== 4 || s2
== 4) {
2452 /* non-cacheable has precedence */
2454 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
2455 /* stage 1 write-through takes precedence */
2457 } else if (extract32(s2
, 2, 2) == 2) {
2458 /* stage 2 write-through takes precedence, but the allocation hint
2459 * is still taken from stage 1
2461 return (2 << 2) | extract32(s1
, 0, 2);
2462 } else { /* write-back */
2468 * Combine the memory type and cacheability attributes of
2469 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2470 * combined attributes in MAIR_EL1 format.
2472 static uint8_t combined_attrs_nofwb(uint64_t hcr
,
2473 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
2475 uint8_t s1lo
, s2lo
, s1hi
, s2hi
, s2_mair_attrs
, ret_attrs
;
2477 if (s2
.is_s2_format
) {
2478 s2_mair_attrs
= convert_stage2_attrs(hcr
, s2
.attrs
);
2480 s2_mair_attrs
= s2
.attrs
;
2483 s1lo
= extract32(s1
.attrs
, 0, 4);
2484 s2lo
= extract32(s2_mair_attrs
, 0, 4);
2485 s1hi
= extract32(s1
.attrs
, 4, 4);
2486 s2hi
= extract32(s2_mair_attrs
, 4, 4);
2488 /* Combine memory type and cacheability attributes */
2489 if (s1hi
== 0 || s2hi
== 0) {
2490 /* Device has precedence over normal */
2491 if (s1lo
== 0 || s2lo
== 0) {
2492 /* nGnRnE has precedence over anything */
2494 } else if (s1lo
== 4 || s2lo
== 4) {
2495 /* non-Reordering has precedence over Reordering */
2496 ret_attrs
= 4; /* nGnRE */
2497 } else if (s1lo
== 8 || s2lo
== 8) {
2498 /* non-Gathering has precedence over Gathering */
2499 ret_attrs
= 8; /* nGRE */
2501 ret_attrs
= 0xc; /* GRE */
2503 } else { /* Normal memory */
2504 /* Outer/inner cacheability combine independently */
2505 ret_attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
2506 | combine_cacheattr_nibble(s1lo
, s2lo
);
2511 static uint8_t force_cacheattr_nibble_wb(uint8_t attr
)
2514 * Given the 4 bits specifying the outer or inner cacheability
2515 * in MAIR format, return a value specifying Normal Write-Back,
2516 * with the allocation and transient hints taken from the input
2517 * if the input specified some kind of cacheable attribute.
2519 if (attr
== 0 || attr
== 4) {
2521 * 0 == an UNPREDICTABLE encoding
2522 * 4 == Non-cacheable
2523 * Either way, force Write-Back RW allocate non-transient
2527 /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2532 * Combine the memory type and cacheability attributes of
2533 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2534 * combined attributes in MAIR_EL1 format.
2536 static uint8_t combined_attrs_fwb(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
2538 assert(s2
.is_s2_format
&& !s1
.is_s2_format
);
2542 /* Use stage 1 attributes */
2546 * Force Normal Write-Back. Note that if S1 is Normal cacheable
2547 * then we take the allocation hints from it; otherwise it is
2548 * RW allocate, non-transient.
2550 if ((s1
.attrs
& 0xf0) == 0) {
2554 /* Need to check the Inner and Outer nibbles separately */
2555 return force_cacheattr_nibble_wb(s1
.attrs
& 0xf) |
2556 force_cacheattr_nibble_wb(s1
.attrs
>> 4) << 4;
2558 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2559 if ((s1
.attrs
& 0xf0) == 0) {
2564 /* Force Device, of subtype specified by S2 */
2565 return s2
.attrs
<< 2;
2568 * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2569 * arbitrarily force Device.
2576 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2577 * and CombineS1S2Desc()
2580 * @s1: Attributes from stage 1 walk
2581 * @s2: Attributes from stage 2 walk
2583 static ARMCacheAttrs
combine_cacheattrs(uint64_t hcr
,
2584 ARMCacheAttrs s1
, ARMCacheAttrs s2
)
2587 bool tagged
= false;
2589 assert(!s1
.is_s2_format
);
2590 ret
.is_s2_format
= false;
2591 ret
.guarded
= s1
.guarded
;
2593 if (s1
.attrs
== 0xf0) {
2598 /* Combine shareability attributes (table D4-43) */
2599 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
2600 /* if either are outer-shareable, the result is outer-shareable */
2601 ret
.shareability
= 2;
2602 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
2603 /* if either are inner-shareable, the result is inner-shareable */
2604 ret
.shareability
= 3;
2606 /* both non-shareable */
2607 ret
.shareability
= 0;
2610 /* Combine memory type and cacheability attributes */
2611 if (hcr
& HCR_FWB
) {
2612 ret
.attrs
= combined_attrs_fwb(s1
, s2
);
2614 ret
.attrs
= combined_attrs_nofwb(hcr
, s1
, s2
);
2618 * Any location for which the resultant memory type is any
2619 * type of Device memory is always treated as Outer Shareable.
2620 * Any location for which the resultant memory type is Normal
2621 * Inner Non-cacheable, Outer Non-cacheable is always treated
2622 * as Outer Shareable.
2623 * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2625 if ((ret
.attrs
& 0xf0) == 0 || ret
.attrs
== 0x44) {
2626 ret
.shareability
= 2;
2629 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2630 if (tagged
&& ret
.attrs
== 0xff) {
2638 * MMU disabled. S1 addresses within aa64 translation regimes are
2639 * still checked for bounds -- see AArch64.S1DisabledOutput().
2641 static bool get_phys_addr_disabled(CPUARMState
*env
, target_ulong address
,
2642 MMUAccessType access_type
,
2643 ARMMMUIdx mmu_idx
, bool is_secure
,
2644 GetPhysAddrResult
*result
,
2645 ARMMMUFaultInfo
*fi
)
2647 uint8_t memattr
= 0x00; /* Device nGnRnE */
2648 uint8_t shareability
= 0; /* non-sharable */
2652 case ARMMMUIdx_Stage2
:
2653 case ARMMMUIdx_Stage2_S
:
2654 case ARMMMUIdx_Phys_NS
:
2655 case ARMMMUIdx_Phys_S
:
2659 r_el
= regime_el(env
, mmu_idx
);
2660 if (arm_el_is_aa64(env
, r_el
)) {
2661 int pamax
= arm_pamax(env_archcpu(env
));
2662 uint64_t tcr
= env
->cp15
.tcr_el
[r_el
];
2665 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
2666 if (access_type
== MMU_INST_FETCH
) {
2667 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
2669 tbi
= (tbi
>> extract64(address
, 55, 1)) & 1;
2670 addrtop
= (tbi
? 55 : 63);
2672 if (extract64(address
, pamax
, addrtop
- pamax
+ 1) != 0) {
2673 fi
->type
= ARMFault_AddressSize
;
2680 * When TBI is disabled, we've just validated that all of the
2681 * bits above PAMax are zero, so logically we only need to
2682 * clear the top byte for TBI. But it's clearer to follow
2683 * the pseudocode set of addrdesc.paddress.
2685 address
= extract64(address
, 0, 52);
2688 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2690 uint64_t hcr
= arm_hcr_el2_eff_secstate(env
, is_secure
);
2692 if (hcr
& HCR_DCT
) {
2693 memattr
= 0xf0; /* Tagged, Normal, WB, RWA */
2695 memattr
= 0xff; /* Normal, WB, RWA */
2699 if (memattr
== 0 && access_type
== MMU_INST_FETCH
) {
2700 if (regime_sctlr(env
, mmu_idx
) & SCTLR_I
) {
2701 memattr
= 0xee; /* Normal, WT, RA, NT */
2703 memattr
= 0x44; /* Normal, NC, No */
2705 shareability
= 2; /* outer sharable */
2707 result
->cacheattrs
.is_s2_format
= false;
2711 result
->f
.phys_addr
= address
;
2712 result
->f
.prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2713 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
2714 result
->cacheattrs
.shareability
= shareability
;
2715 result
->cacheattrs
.attrs
= memattr
;
2719 static bool get_phys_addr_twostage(CPUARMState
*env
, S1Translate
*ptw
,
2720 target_ulong address
,
2721 MMUAccessType access_type
,
2722 GetPhysAddrResult
*result
,
2723 ARMMMUFaultInfo
*fi
)
2726 int s1_prot
, s1_lgpgsz
;
2727 bool is_secure
= ptw
->in_secure
;
2728 bool ret
, ipa_secure
, s2walk_secure
;
2729 ARMCacheAttrs cacheattrs1
;
2733 ret
= get_phys_addr_with_struct(env
, ptw
, address
, access_type
, result
, fi
);
2735 /* If S1 fails, return early. */
2740 ipa
= result
->f
.phys_addr
;
2741 ipa_secure
= result
->f
.attrs
.secure
;
2743 /* Select TCR based on the NS bit from the S1 walk. */
2744 s2walk_secure
= !(ipa_secure
2745 ? env
->cp15
.vstcr_el2
& VSTCR_SW
2746 : env
->cp15
.vtcr_el2
& VTCR_NSW
);
2748 assert(!ipa_secure
);
2749 s2walk_secure
= false;
2752 is_el0
= ptw
->in_mmu_idx
== ARMMMUIdx_Stage1_E0
;
2753 ptw
->in_mmu_idx
= s2walk_secure
? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
2754 ptw
->in_ptw_idx
= s2walk_secure
? ARMMMUIdx_Phys_S
: ARMMMUIdx_Phys_NS
;
2755 ptw
->in_secure
= s2walk_secure
;
2758 * S1 is done, now do S2 translation.
2759 * Save the stage1 results so that we may merge prot and cacheattrs later.
2761 s1_prot
= result
->f
.prot
;
2762 s1_lgpgsz
= result
->f
.lg_page_size
;
2763 cacheattrs1
= result
->cacheattrs
;
2764 memset(result
, 0, sizeof(*result
));
2766 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
2767 ret
= get_phys_addr_pmsav8(env
, ipa
, access_type
,
2768 ptw
->in_mmu_idx
, is_secure
, result
, fi
);
2770 ret
= get_phys_addr_lpae(env
, ptw
, ipa
, access_type
,
2771 is_el0
, result
, fi
);
2775 /* Combine the S1 and S2 perms. */
2776 result
->f
.prot
&= s1_prot
;
2778 /* If S2 fails, return early. */
2784 * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE,
2785 * this means "don't put this in the TLB"; in this case, return a
2786 * result with lg_page_size == 0 to achieve that. Otherwise,
2787 * use the maximum of the S1 & S2 page size, so that invalidation
2788 * of pages > TARGET_PAGE_SIZE works correctly. (This works even though
2789 * we know the combined result permissions etc only cover the minimum
2790 * of the S1 and S2 page size, because we know that the common TLB code
2791 * never actually creates TLB entries bigger than TARGET_PAGE_SIZE,
2792 * and passing a larger page size value only affects invalidations.)
2794 if (result
->f
.lg_page_size
< TARGET_PAGE_BITS
||
2795 s1_lgpgsz
< TARGET_PAGE_BITS
) {
2796 result
->f
.lg_page_size
= 0;
2797 } else if (result
->f
.lg_page_size
< s1_lgpgsz
) {
2798 result
->f
.lg_page_size
= s1_lgpgsz
;
2801 /* Combine the S1 and S2 cache attributes. */
2802 hcr
= arm_hcr_el2_eff_secstate(env
, is_secure
);
2805 * HCR.DC forces the first stage attributes to
2806 * Normal Non-Shareable,
2807 * Inner Write-Back Read-Allocate Write-Allocate,
2808 * Outer Write-Back Read-Allocate Write-Allocate.
2809 * Do not overwrite Tagged within attrs.
2811 if (cacheattrs1
.attrs
!= 0xf0) {
2812 cacheattrs1
.attrs
= 0xff;
2814 cacheattrs1
.shareability
= 0;
2816 result
->cacheattrs
= combine_cacheattrs(hcr
, cacheattrs1
,
2817 result
->cacheattrs
);
2820 * Check if IPA translates to secure or non-secure PA space.
2821 * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
2823 result
->f
.attrs
.secure
=
2825 && !(env
->cp15
.vstcr_el2
& (VSTCR_SA
| VSTCR_SW
))
2827 || !(env
->cp15
.vtcr_el2
& (VTCR_NSA
| VTCR_NSW
))));
2832 static bool get_phys_addr_with_struct(CPUARMState
*env
, S1Translate
*ptw
,
2833 target_ulong address
,
2834 MMUAccessType access_type
,
2835 GetPhysAddrResult
*result
,
2836 ARMMMUFaultInfo
*fi
)
2838 ARMMMUIdx mmu_idx
= ptw
->in_mmu_idx
;
2839 bool is_secure
= ptw
->in_secure
;
2840 ARMMMUIdx s1_mmu_idx
;
2843 * The page table entries may downgrade secure to non-secure, but
2844 * cannot upgrade an non-secure translation regime's attributes
2847 result
->f
.attrs
.secure
= is_secure
;
2850 case ARMMMUIdx_Phys_S
:
2851 case ARMMMUIdx_Phys_NS
:
2852 /* Checking Phys early avoids special casing later vs regime_el. */
2853 return get_phys_addr_disabled(env
, address
, access_type
, mmu_idx
,
2854 is_secure
, result
, fi
);
2856 case ARMMMUIdx_Stage1_E0
:
2857 case ARMMMUIdx_Stage1_E1
:
2858 case ARMMMUIdx_Stage1_E1_PAN
:
2859 /* First stage lookup uses second stage for ptw. */
2860 ptw
->in_ptw_idx
= is_secure
? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
2863 case ARMMMUIdx_E10_0
:
2864 s1_mmu_idx
= ARMMMUIdx_Stage1_E0
;
2866 case ARMMMUIdx_E10_1
:
2867 s1_mmu_idx
= ARMMMUIdx_Stage1_E1
;
2869 case ARMMMUIdx_E10_1_PAN
:
2870 s1_mmu_idx
= ARMMMUIdx_Stage1_E1_PAN
;
2873 * Call ourselves recursively to do the stage 1 and then stage 2
2874 * translations if mmu_idx is a two-stage regime, and EL2 present.
2875 * Otherwise, a stage1+stage2 translation is just stage 1.
2877 ptw
->in_mmu_idx
= mmu_idx
= s1_mmu_idx
;
2878 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2879 !regime_translation_disabled(env
, ARMMMUIdx_Stage2
, is_secure
)) {
2880 return get_phys_addr_twostage(env
, ptw
, address
, access_type
,
2886 /* Single stage and second stage uses physical for ptw. */
2887 ptw
->in_ptw_idx
= is_secure
? ARMMMUIdx_Phys_S
: ARMMMUIdx_Phys_NS
;
2891 result
->f
.attrs
.user
= regime_is_user(env
, mmu_idx
);
2894 * Fast Context Switch Extension. This doesn't exist at all in v8.
2895 * In v7 and earlier it affects all stage 1 translations.
2897 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
2898 && !arm_feature(env
, ARM_FEATURE_V8
)) {
2899 if (regime_el(env
, mmu_idx
) == 3) {
2900 address
+= env
->cp15
.fcseidr_s
;
2902 address
+= env
->cp15
.fcseidr_ns
;
2906 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
2908 result
->f
.lg_page_size
= TARGET_PAGE_BITS
;
2910 if (arm_feature(env
, ARM_FEATURE_V8
)) {
2912 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
2913 is_secure
, result
, fi
);
2914 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2916 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
2917 is_secure
, result
, fi
);
2920 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
2921 is_secure
, result
, fi
);
2923 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
2924 " mmu_idx %u -> %s (prot %c%c%c)\n",
2925 access_type
== MMU_DATA_LOAD
? "reading" :
2926 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
2927 (uint32_t)address
, mmu_idx
,
2928 ret
? "Miss" : "Hit",
2929 result
->f
.prot
& PAGE_READ
? 'r' : '-',
2930 result
->f
.prot
& PAGE_WRITE
? 'w' : '-',
2931 result
->f
.prot
& PAGE_EXEC
? 'x' : '-');
2936 /* Definitely a real MMU, not an MPU */
2938 if (regime_translation_disabled(env
, mmu_idx
, is_secure
)) {
2939 return get_phys_addr_disabled(env
, address
, access_type
, mmu_idx
,
2940 is_secure
, result
, fi
);
2943 if (regime_using_lpae_format(env
, mmu_idx
)) {
2944 return get_phys_addr_lpae(env
, ptw
, address
, access_type
, false,
2946 } else if (arm_feature(env
, ARM_FEATURE_V7
) ||
2947 regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
2948 return get_phys_addr_v6(env
, ptw
, address
, access_type
, result
, fi
);
2950 return get_phys_addr_v5(env
, ptw
, address
, access_type
, result
, fi
);
2954 bool get_phys_addr_with_secure(CPUARMState
*env
, target_ulong address
,
2955 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2956 bool is_secure
, GetPhysAddrResult
*result
,
2957 ARMMMUFaultInfo
*fi
)
2960 .in_mmu_idx
= mmu_idx
,
2961 .in_secure
= is_secure
,
2963 return get_phys_addr_with_struct(env
, &ptw
, address
, access_type
,
2967 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
2968 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
2969 GetPhysAddrResult
*result
, ARMMMUFaultInfo
*fi
)
2974 case ARMMMUIdx_E10_0
:
2975 case ARMMMUIdx_E10_1
:
2976 case ARMMMUIdx_E10_1_PAN
:
2977 case ARMMMUIdx_E20_0
:
2978 case ARMMMUIdx_E20_2
:
2979 case ARMMMUIdx_E20_2_PAN
:
2980 case ARMMMUIdx_Stage1_E0
:
2981 case ARMMMUIdx_Stage1_E1
:
2982 case ARMMMUIdx_Stage1_E1_PAN
:
2984 is_secure
= arm_is_secure_below_el3(env
);
2986 case ARMMMUIdx_Stage2
:
2987 case ARMMMUIdx_Phys_NS
:
2988 case ARMMMUIdx_MPrivNegPri
:
2989 case ARMMMUIdx_MUserNegPri
:
2990 case ARMMMUIdx_MPriv
:
2991 case ARMMMUIdx_MUser
:
2995 case ARMMMUIdx_Stage2_S
:
2996 case ARMMMUIdx_Phys_S
:
2997 case ARMMMUIdx_MSPrivNegPri
:
2998 case ARMMMUIdx_MSUserNegPri
:
2999 case ARMMMUIdx_MSPriv
:
3000 case ARMMMUIdx_MSUser
:
3004 g_assert_not_reached();
3006 return get_phys_addr_with_secure(env
, address
, access_type
, mmu_idx
,
3007 is_secure
, result
, fi
);
3010 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
3013 ARMCPU
*cpu
= ARM_CPU(cs
);
3014 CPUARMState
*env
= &cpu
->env
;
3016 .in_mmu_idx
= arm_mmu_idx(env
),
3017 .in_secure
= arm_is_secure(env
),
3020 GetPhysAddrResult res
= {};
3021 ARMMMUFaultInfo fi
= {};
3024 ret
= get_phys_addr_with_struct(env
, &ptw
, addr
, MMU_DATA_LOAD
, &res
, &fi
);
3025 *attrs
= res
.f
.attrs
;
3030 return res
.f
.phys_addr
;