2 * ARM page table walking.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
11 #include "qemu/range.h"
13 #include "internals.h"
17 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
18 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
19 hwaddr
*phys_ptr
, int *prot
,
20 target_ulong
*page_size
,
23 CPUState
*cs
= env_cpu(env
);
35 /* Lookup l1 descriptor. */
36 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
37 /* Section translation fault if page walk is disabled by PD0 or PD1 */
38 fi
->type
= ARMFault_Translation
;
41 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
43 if (fi
->type
!= ARMFault_None
) {
47 domain
= (desc
>> 5) & 0x0f;
48 if (regime_el(env
, mmu_idx
) == 1) {
49 dacr
= env
->cp15
.dacr_ns
;
51 dacr
= env
->cp15
.dacr_s
;
53 domain_prot
= (dacr
>> (domain
* 2)) & 3;
55 /* Section translation fault. */
56 fi
->type
= ARMFault_Translation
;
62 if (domain_prot
== 0 || domain_prot
== 2) {
63 fi
->type
= ARMFault_Domain
;
68 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
69 ap
= (desc
>> 10) & 3;
70 *page_size
= 1024 * 1024;
72 /* Lookup l2 entry. */
74 /* Coarse pagetable. */
75 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
78 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
80 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
82 if (fi
->type
!= ARMFault_None
) {
86 case 0: /* Page translation fault. */
87 fi
->type
= ARMFault_Translation
;
89 case 1: /* 64k page. */
90 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
91 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
94 case 2: /* 4k page. */
95 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
96 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
99 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
101 /* ARMv6/XScale extended small page format */
102 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
103 || arm_feature(env
, ARM_FEATURE_V6
)) {
104 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
108 * UNPREDICTABLE in ARMv5; we choose to take a
109 * page translation fault.
111 fi
->type
= ARMFault_Translation
;
115 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
118 ap
= (desc
>> 4) & 3;
121 /* Never happens, but compiler isn't smart enough to tell. */
122 g_assert_not_reached();
125 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
126 *prot
|= *prot
? PAGE_EXEC
: 0;
127 if (!(*prot
& (1 << access_type
))) {
128 /* Access permission fault. */
129 fi
->type
= ARMFault_Permission
;
132 *phys_ptr
= phys_addr
;
140 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
141 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
142 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
143 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
145 CPUState
*cs
= env_cpu(env
);
146 ARMCPU
*cpu
= env_archcpu(env
);
160 /* Pagetable walk. */
161 /* Lookup l1 descriptor. */
162 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
163 /* Section translation fault if page walk is disabled by PD0 or PD1 */
164 fi
->type
= ARMFault_Translation
;
167 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
169 if (fi
->type
!= ARMFault_None
) {
173 if (type
== 0 || (type
== 3 && !cpu_isar_feature(aa32_pxn
, cpu
))) {
174 /* Section translation fault, or attempt to use the encoding
175 * which is Reserved on implementations without PXN.
177 fi
->type
= ARMFault_Translation
;
180 if ((type
== 1) || !(desc
& (1 << 18))) {
181 /* Page or Section. */
182 domain
= (desc
>> 5) & 0x0f;
184 if (regime_el(env
, mmu_idx
) == 1) {
185 dacr
= env
->cp15
.dacr_ns
;
187 dacr
= env
->cp15
.dacr_s
;
192 domain_prot
= (dacr
>> (domain
* 2)) & 3;
193 if (domain_prot
== 0 || domain_prot
== 2) {
194 /* Section or Page domain fault */
195 fi
->type
= ARMFault_Domain
;
199 if (desc
& (1 << 18)) {
201 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
202 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
203 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
204 *page_size
= 0x1000000;
207 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
208 *page_size
= 0x100000;
210 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
211 xn
= desc
& (1 << 4);
213 ns
= extract32(desc
, 19, 1);
215 if (cpu_isar_feature(aa32_pxn
, cpu
)) {
216 pxn
= (desc
>> 2) & 1;
218 ns
= extract32(desc
, 3, 1);
219 /* Lookup l2 entry. */
220 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
221 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
223 if (fi
->type
!= ARMFault_None
) {
226 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
228 case 0: /* Page translation fault. */
229 fi
->type
= ARMFault_Translation
;
231 case 1: /* 64k page. */
232 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
233 xn
= desc
& (1 << 15);
234 *page_size
= 0x10000;
236 case 2: case 3: /* 4k page. */
237 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
242 /* Never happens, but compiler isn't smart enough to tell. */
243 g_assert_not_reached();
246 if (domain_prot
== 3) {
247 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
249 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
252 if (xn
&& access_type
== MMU_INST_FETCH
) {
253 fi
->type
= ARMFault_Permission
;
257 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
258 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
259 /* The simplified model uses AP[0] as an access control bit. */
261 /* Access flag fault. */
262 fi
->type
= ARMFault_AccessFlag
;
265 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
267 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
272 if (!(*prot
& (1 << access_type
))) {
273 /* Access permission fault. */
274 fi
->type
= ARMFault_Permission
;
279 /* The NS bit will (as required by the architecture) have no effect if
280 * the CPU doesn't support TZ or this is a non-secure translation
281 * regime, because the attribute will already be non-secure.
283 attrs
->secure
= false;
285 *phys_ptr
= phys_addr
;
293 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
294 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
295 hwaddr
*phys_ptr
, int *prot
,
301 bool is_user
= regime_is_user(env
, mmu_idx
);
303 if (regime_translation_disabled(env
, mmu_idx
)) {
306 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
311 for (n
= 7; n
>= 0; n
--) {
312 base
= env
->cp15
.c6_region
[n
];
313 if ((base
& 1) == 0) {
316 mask
= 1 << ((base
>> 1) & 0x1f);
317 /* Keep this shift separate from the above to avoid an
318 (undefined) << 32. */
319 mask
= (mask
<< 1) - 1;
320 if (((base
^ address
) & ~mask
) == 0) {
325 fi
->type
= ARMFault_Background
;
329 if (access_type
== MMU_INST_FETCH
) {
330 mask
= env
->cp15
.pmsav5_insn_ap
;
332 mask
= env
->cp15
.pmsav5_data_ap
;
334 mask
= (mask
>> (n
* 4)) & 0xf;
337 fi
->type
= ARMFault_Permission
;
342 fi
->type
= ARMFault_Permission
;
346 *prot
= PAGE_READ
| PAGE_WRITE
;
355 *prot
= PAGE_READ
| PAGE_WRITE
;
359 fi
->type
= ARMFault_Permission
;
369 /* Bad permission. */
370 fi
->type
= ARMFault_Permission
;
378 static void get_phys_addr_pmsav7_default(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
379 int32_t address
, int *prot
)
381 if (!arm_feature(env
, ARM_FEATURE_M
)) {
382 *prot
= PAGE_READ
| PAGE_WRITE
;
384 case 0xF0000000 ... 0xFFFFFFFF:
385 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
386 /* hivecs execing is ok */
390 case 0x00000000 ... 0x7FFFFFFF:
395 /* Default system address map for M profile cores.
396 * The architecture specifies which regions are execute-never;
397 * at the MPU level no other checks are defined.
400 case 0x00000000 ... 0x1fffffff: /* ROM */
401 case 0x20000000 ... 0x3fffffff: /* SRAM */
402 case 0x60000000 ... 0x7fffffff: /* RAM */
403 case 0x80000000 ... 0x9fffffff: /* RAM */
404 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
406 case 0x40000000 ... 0x5fffffff: /* Peripheral */
407 case 0xa0000000 ... 0xbfffffff: /* Device */
408 case 0xc0000000 ... 0xdfffffff: /* Device */
409 case 0xe0000000 ... 0xffffffff: /* System */
410 *prot
= PAGE_READ
| PAGE_WRITE
;
413 g_assert_not_reached();
418 static bool pmsav7_use_background_region(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
422 * Return true if we should use the default memory map as a
423 * "background" region if there are no hits against any MPU regions.
425 CPUARMState
*env
= &cpu
->env
;
431 if (arm_feature(env
, ARM_FEATURE_M
)) {
432 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
433 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
435 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
439 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
440 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
441 hwaddr
*phys_ptr
, int *prot
,
442 target_ulong
*page_size
,
445 ARMCPU
*cpu
= env_archcpu(env
);
447 bool is_user
= regime_is_user(env
, mmu_idx
);
450 *page_size
= TARGET_PAGE_SIZE
;
453 if (regime_translation_disabled(env
, mmu_idx
) ||
454 m_is_ppb_region(env
, address
)) {
456 * MPU disabled or M profile PPB access: use default memory map.
457 * The other case which uses the default memory map in the
458 * v7M ARM ARM pseudocode is exception vector reads from the vector
459 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
460 * which always does a direct read using address_space_ldl(), rather
461 * than going via this function, so we don't need to check that here.
463 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
464 } else { /* MPU enabled */
465 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
467 uint32_t base
= env
->pmsav7
.drbar
[n
];
468 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
472 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
477 qemu_log_mask(LOG_GUEST_ERROR
,
478 "DRSR[%d]: Rsize field cannot be 0\n", n
);
482 rmask
= (1ull << rsize
) - 1;
485 qemu_log_mask(LOG_GUEST_ERROR
,
486 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
487 "to DRSR region size, mask = 0x%" PRIx32
"\n",
492 if (address
< base
|| address
> base
+ rmask
) {
494 * Address not in this region. We must check whether the
495 * region covers addresses in the same page as our address.
496 * In that case we must not report a size that covers the
497 * whole page for a subsequent hit against a different MPU
498 * region or the background region, because it would result in
499 * incorrect TLB hits for subsequent accesses to addresses that
500 * are in this MPU region.
502 if (ranges_overlap(base
, rmask
,
503 address
& TARGET_PAGE_MASK
,
512 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
516 rsize
-= 3; /* sub region size (power of 2) */
517 snd
= ((address
- base
) >> rsize
) & 0x7;
518 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
520 srdis_mask
= srdis
? 0x3 : 0x0;
521 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
523 * This will check in groups of 2, 4 and then 8, whether
524 * the subregion bits are consistent. rsize is incremented
525 * back up to give the region size, considering consistent
526 * adjacent subregions as one region. Stop testing if rsize
527 * is already big enough for an entire QEMU page.
529 int snd_rounded
= snd
& ~(i
- 1);
530 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
532 if (srdis_mask
^ srdis_multi
) {
535 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
542 if (rsize
< TARGET_PAGE_BITS
) {
543 *page_size
= 1 << rsize
;
548 if (n
== -1) { /* no hits */
549 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
550 /* background fault */
551 fi
->type
= ARMFault_Background
;
554 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
555 } else { /* a MPU hit! */
556 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
557 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
559 if (m_is_system_region(env
, address
)) {
560 /* System space is always execute never */
564 if (is_user
) { /* User mode AP bit decoding */
569 break; /* no access */
575 *prot
|= PAGE_READ
| PAGE_EXEC
;
578 /* for v7M, same as 6; for R profile a reserved value */
579 if (arm_feature(env
, ARM_FEATURE_M
)) {
580 *prot
|= PAGE_READ
| PAGE_EXEC
;
585 qemu_log_mask(LOG_GUEST_ERROR
,
586 "DRACR[%d]: Bad value for AP bits: 0x%"
589 } else { /* Priv. mode AP bits decoding */
592 break; /* no access */
600 *prot
|= PAGE_READ
| PAGE_EXEC
;
603 /* for v7M, same as 6; for R profile a reserved value */
604 if (arm_feature(env
, ARM_FEATURE_M
)) {
605 *prot
|= PAGE_READ
| PAGE_EXEC
;
610 qemu_log_mask(LOG_GUEST_ERROR
,
611 "DRACR[%d]: Bad value for AP bits: 0x%"
623 fi
->type
= ARMFault_Permission
;
625 return !(*prot
& (1 << access_type
));
628 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
629 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
630 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
631 int *prot
, bool *is_subpage
,
632 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
635 * Perform a PMSAv8 MPU lookup (without also doing the SAU check
636 * that a full phys-to-virt translation does).
637 * mregion is (if not NULL) set to the region number which matched,
638 * or -1 if no region number is returned (MPU off, address did not
639 * hit a region, address hit in multiple regions).
640 * We set is_subpage to true if the region hit doesn't cover the
641 * entire TARGET_PAGE the address is within.
643 ARMCPU
*cpu
= env_archcpu(env
);
644 bool is_user
= regime_is_user(env
, mmu_idx
);
645 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
647 int matchregion
= -1;
649 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
650 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
660 * Unlike the ARM ARM pseudocode, we don't need to check whether this
661 * was an exception vector read from the vector table (which is always
662 * done using the default system address map), because those accesses
663 * are done in arm_v7m_load_vector(), which always does a direct
664 * read using address_space_ldl(), rather than going via this function.
666 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
668 } else if (m_is_ppb_region(env
, address
)) {
671 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
675 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
678 * Note that the base address is bits [31:5] from the register
679 * with bits [4:0] all zeroes, but the limit address is bits
680 * [31:5] from the register with bits [4:0] all ones.
682 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
683 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
685 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
686 /* Region disabled */
690 if (address
< base
|| address
> limit
) {
692 * Address not in this region. We must check whether the
693 * region covers addresses in the same page as our address.
694 * In that case we must not report a size that covers the
695 * whole page for a subsequent hit against a different MPU
696 * region or the background region, because it would result in
697 * incorrect TLB hits for subsequent accesses to addresses that
698 * are in this MPU region.
701 ranges_overlap(base
, limit
- base
+ 1,
709 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
713 if (matchregion
!= -1) {
715 * Multiple regions match -- always a failure (unlike
716 * PMSAv7 where highest-numbered-region wins)
718 fi
->type
= ARMFault_Permission
;
729 /* background fault */
730 fi
->type
= ARMFault_Background
;
734 if (matchregion
== -1) {
735 /* hit using the background region */
736 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
738 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
739 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
742 if (arm_feature(env
, ARM_FEATURE_V8_1M
)) {
743 pxn
= extract32(env
->pmsav8
.rlar
[secure
][matchregion
], 4, 1);
746 if (m_is_system_region(env
, address
)) {
747 /* System space is always execute never */
751 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
752 if (*prot
&& !xn
&& !(pxn
&& !is_user
)) {
756 * We don't need to look the attribute up in the MAIR0/MAIR1
757 * registers because that only tells us about cacheability.
760 *mregion
= matchregion
;
764 fi
->type
= ARMFault_Permission
;
766 return !(*prot
& (1 << access_type
));
769 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
770 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
771 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
772 int *prot
, target_ulong
*page_size
,
775 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
776 V8M_SAttributes sattrs
= {};
780 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
781 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
782 if (access_type
== MMU_INST_FETCH
) {
784 * Instruction fetches always use the MMU bank and the
785 * transaction attribute determined by the fetch address,
786 * regardless of CPU state. This is painful for QEMU
787 * to handle, because it would mean we need to encode
788 * into the mmu_idx not just the (user, negpri) information
789 * for the current security state but also that for the
790 * other security state, which would balloon the number
791 * of mmu_idx values needed alarmingly.
792 * Fortunately we can avoid this because it's not actually
793 * possible to arbitrarily execute code from memory with
794 * the wrong security attribute: it will always generate
795 * an exception of some kind or another, apart from the
796 * special case of an NS CPU executing an SG instruction
797 * in S&NSC memory. So we always just fail the translation
798 * here and sort things out in the exception handler
799 * (including possibly emulating an SG instruction).
801 if (sattrs
.ns
!= !secure
) {
803 fi
->type
= ARMFault_QEMU_NSCExec
;
805 fi
->type
= ARMFault_QEMU_SFault
;
807 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
814 * For data accesses we always use the MMU bank indicated
815 * by the current CPU state, but the security attributes
816 * might downgrade a secure access to nonsecure.
819 txattrs
->secure
= false;
820 } else if (!secure
) {
822 * NS access to S memory must fault.
823 * Architecturally we should first check whether the
824 * MPU information for this address indicates that we
825 * are doing an unaligned access to Device memory, which
826 * should generate a UsageFault instead. QEMU does not
827 * currently check for that kind of unaligned access though.
828 * If we added it we would need to do so as a special case
829 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
831 fi
->type
= ARMFault_QEMU_SFault
;
832 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
840 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
841 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
842 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
847 * get_phys_addr - get the physical address for this virtual address
849 * Find the physical address corresponding to the given virtual address,
850 * by doing a translation table walk on MMU based systems or using the
851 * MPU state on MPU based systems.
853 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
854 * prot and page_size may not be filled in, and the populated fsr value provides
855 * information on why the translation aborted, in the format of a
856 * DFSR/IFSR fault register, with the following caveats:
857 * * we honour the short vs long DFSR format differences.
858 * * the WnR bit is never set (the caller must do this).
859 * * for PSMAv5 based systems we don't bother to return a full FSR format
863 * @address: virtual address to get physical address for
864 * @access_type: 0 for read, 1 for write, 2 for execute
865 * @mmu_idx: MMU index indicating required translation regime
866 * @phys_ptr: set to the physical address corresponding to the virtual address
867 * @attrs: set to the memory transaction attributes to use
868 * @prot: set to the permissions for the page containing phys_ptr
869 * @page_size: set to the size of the page containing phys_ptr
870 * @fi: set to fault info if the translation fails
871 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
873 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
874 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
875 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
876 target_ulong
*page_size
,
877 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
879 ARMMMUIdx s1_mmu_idx
= stage_1_mmu_idx(mmu_idx
);
881 if (mmu_idx
!= s1_mmu_idx
) {
883 * Call ourselves recursively to do the stage 1 and then stage 2
884 * translations if mmu_idx is a two-stage regime.
886 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
891 ARMCacheAttrs cacheattrs2
= {};
892 ARMMMUIdx s2_mmu_idx
;
895 ret
= get_phys_addr(env
, address
, access_type
, s1_mmu_idx
, &ipa
,
896 attrs
, prot
, page_size
, fi
, cacheattrs
);
898 /* If S1 fails or S2 is disabled, return early. */
899 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_Stage2
)) {
904 ipa_secure
= attrs
->secure
;
905 if (arm_is_secure_below_el3(env
)) {
907 attrs
->secure
= !(env
->cp15
.vstcr_el2
.raw_tcr
& VSTCR_SW
);
909 attrs
->secure
= !(env
->cp15
.vtcr_el2
.raw_tcr
& VTCR_NSW
);
915 s2_mmu_idx
= attrs
->secure
? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2
;
916 is_el0
= mmu_idx
== ARMMMUIdx_E10_0
|| mmu_idx
== ARMMMUIdx_SE10_0
;
918 /* S1 is done. Now do S2 translation. */
919 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, s2_mmu_idx
, is_el0
,
920 phys_ptr
, attrs
, &s2_prot
,
921 page_size
, fi
, &cacheattrs2
);
923 /* Combine the S1 and S2 perms. */
926 /* If S2 fails, return early. */
931 /* Combine the S1 and S2 cache attributes. */
932 if (arm_hcr_el2_eff(env
) & HCR_DC
) {
934 * HCR.DC forces the first stage attributes to
935 * Normal Non-Shareable,
936 * Inner Write-Back Read-Allocate Write-Allocate,
937 * Outer Write-Back Read-Allocate Write-Allocate.
938 * Do not overwrite Tagged within attrs.
940 if (cacheattrs
->attrs
!= 0xf0) {
941 cacheattrs
->attrs
= 0xff;
943 cacheattrs
->shareability
= 0;
945 *cacheattrs
= combine_cacheattrs(env
, *cacheattrs
, cacheattrs2
);
947 /* Check if IPA translates to secure or non-secure PA space. */
948 if (arm_is_secure_below_el3(env
)) {
951 !(env
->cp15
.vstcr_el2
.raw_tcr
& (VSTCR_SA
| VSTCR_SW
));
954 !((env
->cp15
.vtcr_el2
.raw_tcr
& (VTCR_NSA
| VTCR_NSW
))
955 || (env
->cp15
.vstcr_el2
.raw_tcr
& (VSTCR_SA
| VSTCR_SW
)));
961 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
963 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
968 * The page table entries may downgrade secure to non-secure, but
969 * cannot upgrade an non-secure translation regime's attributes
972 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
973 attrs
->user
= regime_is_user(env
, mmu_idx
);
976 * Fast Context Switch Extension. This doesn't exist at all in v8.
977 * In v7 and earlier it affects all stage 1 translations.
979 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_Stage2
980 && !arm_feature(env
, ARM_FEATURE_V8
)) {
981 if (regime_el(env
, mmu_idx
) == 3) {
982 address
+= env
->cp15
.fcseidr_s
;
984 address
+= env
->cp15
.fcseidr_ns
;
988 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
990 *page_size
= TARGET_PAGE_SIZE
;
992 if (arm_feature(env
, ARM_FEATURE_V8
)) {
994 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
995 phys_ptr
, attrs
, prot
, page_size
, fi
);
996 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
998 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
999 phys_ptr
, prot
, page_size
, fi
);
1002 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
1003 phys_ptr
, prot
, fi
);
1005 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
1006 " mmu_idx %u -> %s (prot %c%c%c)\n",
1007 access_type
== MMU_DATA_LOAD
? "reading" :
1008 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
1009 (uint32_t)address
, mmu_idx
,
1010 ret
? "Miss" : "Hit",
1011 *prot
& PAGE_READ
? 'r' : '-',
1012 *prot
& PAGE_WRITE
? 'w' : '-',
1013 *prot
& PAGE_EXEC
? 'x' : '-');
1018 /* Definitely a real MMU, not an MPU */
1020 if (regime_translation_disabled(env
, mmu_idx
)) {
1025 * MMU disabled. S1 addresses within aa64 translation regimes are
1026 * still checked for bounds -- see AArch64.TranslateAddressS1Off.
1028 if (mmu_idx
!= ARMMMUIdx_Stage2
&& mmu_idx
!= ARMMMUIdx_Stage2_S
) {
1029 int r_el
= regime_el(env
, mmu_idx
);
1030 if (arm_el_is_aa64(env
, r_el
)) {
1031 int pamax
= arm_pamax(env_archcpu(env
));
1032 uint64_t tcr
= env
->cp15
.tcr_el
[r_el
].raw_tcr
;
1035 tbi
= aa64_va_parameter_tbi(tcr
, mmu_idx
);
1036 if (access_type
== MMU_INST_FETCH
) {
1037 tbi
&= ~aa64_va_parameter_tbid(tcr
, mmu_idx
);
1039 tbi
= (tbi
>> extract64(address
, 55, 1)) & 1;
1040 addrtop
= (tbi
? 55 : 63);
1042 if (extract64(address
, pamax
, addrtop
- pamax
+ 1) != 0) {
1043 fi
->type
= ARMFault_AddressSize
;
1050 * When TBI is disabled, we've just validated that all of the
1051 * bits above PAMax are zero, so logically we only need to
1052 * clear the top byte for TBI. But it's clearer to follow
1053 * the pseudocode set of addrdesc.paddress.
1055 address
= extract64(address
, 0, 52);
1058 *phys_ptr
= address
;
1059 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1060 *page_size
= TARGET_PAGE_SIZE
;
1062 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
1063 hcr
= arm_hcr_el2_eff(env
);
1064 cacheattrs
->shareability
= 0;
1065 cacheattrs
->is_s2_format
= false;
1067 if (hcr
& HCR_DCT
) {
1068 memattr
= 0xf0; /* Tagged, Normal, WB, RWA */
1070 memattr
= 0xff; /* Normal, WB, RWA */
1072 } else if (access_type
== MMU_INST_FETCH
) {
1073 if (regime_sctlr(env
, mmu_idx
) & SCTLR_I
) {
1074 memattr
= 0xee; /* Normal, WT, RA, NT */
1076 memattr
= 0x44; /* Normal, NC, No */
1078 cacheattrs
->shareability
= 2; /* outer sharable */
1080 memattr
= 0x00; /* Device, nGnRnE */
1082 cacheattrs
->attrs
= memattr
;
1086 if (regime_using_lpae_format(env
, mmu_idx
)) {
1087 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
, false,
1088 phys_ptr
, attrs
, prot
, page_size
,
1090 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
1091 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
1092 phys_ptr
, attrs
, prot
, page_size
, fi
);
1094 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
1095 phys_ptr
, prot
, page_size
, fi
);