target/arm: Move pmsav7_use_background_region to ptw.c
[qemu.git] / target / arm / ptw.c
blobb82638b5a06fd5209af4b6715a574f7c89e2fb9b
1 /*
2 * ARM page table walking.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/range.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "ptw.h"
17 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
18 MMUAccessType access_type, ARMMMUIdx mmu_idx,
19 hwaddr *phys_ptr, int *prot,
20 target_ulong *page_size,
21 ARMMMUFaultInfo *fi)
23 CPUState *cs = env_cpu(env);
24 int level = 1;
25 uint32_t table;
26 uint32_t desc;
27 int type;
28 int ap;
29 int domain = 0;
30 int domain_prot;
31 hwaddr phys_addr;
32 uint32_t dacr;
34 /* Pagetable walk. */
35 /* Lookup l1 descriptor. */
36 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
37 /* Section translation fault if page walk is disabled by PD0 or PD1 */
38 fi->type = ARMFault_Translation;
39 goto do_fault;
41 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
42 mmu_idx, fi);
43 if (fi->type != ARMFault_None) {
44 goto do_fault;
46 type = (desc & 3);
47 domain = (desc >> 5) & 0x0f;
48 if (regime_el(env, mmu_idx) == 1) {
49 dacr = env->cp15.dacr_ns;
50 } else {
51 dacr = env->cp15.dacr_s;
53 domain_prot = (dacr >> (domain * 2)) & 3;
54 if (type == 0) {
55 /* Section translation fault. */
56 fi->type = ARMFault_Translation;
57 goto do_fault;
59 if (type != 2) {
60 level = 2;
62 if (domain_prot == 0 || domain_prot == 2) {
63 fi->type = ARMFault_Domain;
64 goto do_fault;
66 if (type == 2) {
67 /* 1Mb section. */
68 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
69 ap = (desc >> 10) & 3;
70 *page_size = 1024 * 1024;
71 } else {
72 /* Lookup l2 entry. */
73 if (type == 1) {
74 /* Coarse pagetable. */
75 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
76 } else {
77 /* Fine pagetable. */
78 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
80 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
81 mmu_idx, fi);
82 if (fi->type != ARMFault_None) {
83 goto do_fault;
85 switch (desc & 3) {
86 case 0: /* Page translation fault. */
87 fi->type = ARMFault_Translation;
88 goto do_fault;
89 case 1: /* 64k page. */
90 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
91 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
92 *page_size = 0x10000;
93 break;
94 case 2: /* 4k page. */
95 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
96 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
97 *page_size = 0x1000;
98 break;
99 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
100 if (type == 1) {
101 /* ARMv6/XScale extended small page format */
102 if (arm_feature(env, ARM_FEATURE_XSCALE)
103 || arm_feature(env, ARM_FEATURE_V6)) {
104 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
105 *page_size = 0x1000;
106 } else {
108 * UNPREDICTABLE in ARMv5; we choose to take a
109 * page translation fault.
111 fi->type = ARMFault_Translation;
112 goto do_fault;
114 } else {
115 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
116 *page_size = 0x400;
118 ap = (desc >> 4) & 3;
119 break;
120 default:
121 /* Never happens, but compiler isn't smart enough to tell. */
122 g_assert_not_reached();
125 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
126 *prot |= *prot ? PAGE_EXEC : 0;
127 if (!(*prot & (1 << access_type))) {
128 /* Access permission fault. */
129 fi->type = ARMFault_Permission;
130 goto do_fault;
132 *phys_ptr = phys_addr;
133 return false;
134 do_fault:
135 fi->domain = domain;
136 fi->level = level;
137 return true;
140 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
141 MMUAccessType access_type, ARMMMUIdx mmu_idx,
142 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
143 target_ulong *page_size, ARMMMUFaultInfo *fi)
145 CPUState *cs = env_cpu(env);
146 ARMCPU *cpu = env_archcpu(env);
147 int level = 1;
148 uint32_t table;
149 uint32_t desc;
150 uint32_t xn;
151 uint32_t pxn = 0;
152 int type;
153 int ap;
154 int domain = 0;
155 int domain_prot;
156 hwaddr phys_addr;
157 uint32_t dacr;
158 bool ns;
160 /* Pagetable walk. */
161 /* Lookup l1 descriptor. */
162 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
163 /* Section translation fault if page walk is disabled by PD0 or PD1 */
164 fi->type = ARMFault_Translation;
165 goto do_fault;
167 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
168 mmu_idx, fi);
169 if (fi->type != ARMFault_None) {
170 goto do_fault;
172 type = (desc & 3);
173 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
174 /* Section translation fault, or attempt to use the encoding
175 * which is Reserved on implementations without PXN.
177 fi->type = ARMFault_Translation;
178 goto do_fault;
180 if ((type == 1) || !(desc & (1 << 18))) {
181 /* Page or Section. */
182 domain = (desc >> 5) & 0x0f;
184 if (regime_el(env, mmu_idx) == 1) {
185 dacr = env->cp15.dacr_ns;
186 } else {
187 dacr = env->cp15.dacr_s;
189 if (type == 1) {
190 level = 2;
192 domain_prot = (dacr >> (domain * 2)) & 3;
193 if (domain_prot == 0 || domain_prot == 2) {
194 /* Section or Page domain fault */
195 fi->type = ARMFault_Domain;
196 goto do_fault;
198 if (type != 1) {
199 if (desc & (1 << 18)) {
200 /* Supersection. */
201 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
202 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
203 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
204 *page_size = 0x1000000;
205 } else {
206 /* Section. */
207 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
208 *page_size = 0x100000;
210 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
211 xn = desc & (1 << 4);
212 pxn = desc & 1;
213 ns = extract32(desc, 19, 1);
214 } else {
215 if (cpu_isar_feature(aa32_pxn, cpu)) {
216 pxn = (desc >> 2) & 1;
218 ns = extract32(desc, 3, 1);
219 /* Lookup l2 entry. */
220 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
221 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
222 mmu_idx, fi);
223 if (fi->type != ARMFault_None) {
224 goto do_fault;
226 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
227 switch (desc & 3) {
228 case 0: /* Page translation fault. */
229 fi->type = ARMFault_Translation;
230 goto do_fault;
231 case 1: /* 64k page. */
232 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
233 xn = desc & (1 << 15);
234 *page_size = 0x10000;
235 break;
236 case 2: case 3: /* 4k page. */
237 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
238 xn = desc & 1;
239 *page_size = 0x1000;
240 break;
241 default:
242 /* Never happens, but compiler isn't smart enough to tell. */
243 g_assert_not_reached();
246 if (domain_prot == 3) {
247 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
248 } else {
249 if (pxn && !regime_is_user(env, mmu_idx)) {
250 xn = 1;
252 if (xn && access_type == MMU_INST_FETCH) {
253 fi->type = ARMFault_Permission;
254 goto do_fault;
257 if (arm_feature(env, ARM_FEATURE_V6K) &&
258 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
259 /* The simplified model uses AP[0] as an access control bit. */
260 if ((ap & 1) == 0) {
261 /* Access flag fault. */
262 fi->type = ARMFault_AccessFlag;
263 goto do_fault;
265 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
266 } else {
267 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
269 if (*prot && !xn) {
270 *prot |= PAGE_EXEC;
272 if (!(*prot & (1 << access_type))) {
273 /* Access permission fault. */
274 fi->type = ARMFault_Permission;
275 goto do_fault;
278 if (ns) {
279 /* The NS bit will (as required by the architecture) have no effect if
280 * the CPU doesn't support TZ or this is a non-secure translation
281 * regime, because the attribute will already be non-secure.
283 attrs->secure = false;
285 *phys_ptr = phys_addr;
286 return false;
287 do_fault:
288 fi->domain = domain;
289 fi->level = level;
290 return true;
293 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
294 MMUAccessType access_type, ARMMMUIdx mmu_idx,
295 hwaddr *phys_ptr, int *prot,
296 ARMMMUFaultInfo *fi)
298 int n;
299 uint32_t mask;
300 uint32_t base;
301 bool is_user = regime_is_user(env, mmu_idx);
303 if (regime_translation_disabled(env, mmu_idx)) {
304 /* MPU disabled. */
305 *phys_ptr = address;
306 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
307 return false;
310 *phys_ptr = address;
311 for (n = 7; n >= 0; n--) {
312 base = env->cp15.c6_region[n];
313 if ((base & 1) == 0) {
314 continue;
316 mask = 1 << ((base >> 1) & 0x1f);
317 /* Keep this shift separate from the above to avoid an
318 (undefined) << 32. */
319 mask = (mask << 1) - 1;
320 if (((base ^ address) & ~mask) == 0) {
321 break;
324 if (n < 0) {
325 fi->type = ARMFault_Background;
326 return true;
329 if (access_type == MMU_INST_FETCH) {
330 mask = env->cp15.pmsav5_insn_ap;
331 } else {
332 mask = env->cp15.pmsav5_data_ap;
334 mask = (mask >> (n * 4)) & 0xf;
335 switch (mask) {
336 case 0:
337 fi->type = ARMFault_Permission;
338 fi->level = 1;
339 return true;
340 case 1:
341 if (is_user) {
342 fi->type = ARMFault_Permission;
343 fi->level = 1;
344 return true;
346 *prot = PAGE_READ | PAGE_WRITE;
347 break;
348 case 2:
349 *prot = PAGE_READ;
350 if (!is_user) {
351 *prot |= PAGE_WRITE;
353 break;
354 case 3:
355 *prot = PAGE_READ | PAGE_WRITE;
356 break;
357 case 5:
358 if (is_user) {
359 fi->type = ARMFault_Permission;
360 fi->level = 1;
361 return true;
363 *prot = PAGE_READ;
364 break;
365 case 6:
366 *prot = PAGE_READ;
367 break;
368 default:
369 /* Bad permission. */
370 fi->type = ARMFault_Permission;
371 fi->level = 1;
372 return true;
374 *prot |= PAGE_EXEC;
375 return false;
378 static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
379 int32_t address, int *prot)
381 if (!arm_feature(env, ARM_FEATURE_M)) {
382 *prot = PAGE_READ | PAGE_WRITE;
383 switch (address) {
384 case 0xF0000000 ... 0xFFFFFFFF:
385 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
386 /* hivecs execing is ok */
387 *prot |= PAGE_EXEC;
389 break;
390 case 0x00000000 ... 0x7FFFFFFF:
391 *prot |= PAGE_EXEC;
392 break;
394 } else {
395 /* Default system address map for M profile cores.
396 * The architecture specifies which regions are execute-never;
397 * at the MPU level no other checks are defined.
399 switch (address) {
400 case 0x00000000 ... 0x1fffffff: /* ROM */
401 case 0x20000000 ... 0x3fffffff: /* SRAM */
402 case 0x60000000 ... 0x7fffffff: /* RAM */
403 case 0x80000000 ... 0x9fffffff: /* RAM */
404 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
405 break;
406 case 0x40000000 ... 0x5fffffff: /* Peripheral */
407 case 0xa0000000 ... 0xbfffffff: /* Device */
408 case 0xc0000000 ... 0xdfffffff: /* Device */
409 case 0xe0000000 ... 0xffffffff: /* System */
410 *prot = PAGE_READ | PAGE_WRITE;
411 break;
412 default:
413 g_assert_not_reached();
418 static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
419 bool is_user)
422 * Return true if we should use the default memory map as a
423 * "background" region if there are no hits against any MPU regions.
425 CPUARMState *env = &cpu->env;
427 if (is_user) {
428 return false;
431 if (arm_feature(env, ARM_FEATURE_M)) {
432 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
433 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
434 } else {
435 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
439 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
440 MMUAccessType access_type, ARMMMUIdx mmu_idx,
441 hwaddr *phys_ptr, int *prot,
442 target_ulong *page_size,
443 ARMMMUFaultInfo *fi)
445 ARMCPU *cpu = env_archcpu(env);
446 int n;
447 bool is_user = regime_is_user(env, mmu_idx);
449 *phys_ptr = address;
450 *page_size = TARGET_PAGE_SIZE;
451 *prot = 0;
453 if (regime_translation_disabled(env, mmu_idx) ||
454 m_is_ppb_region(env, address)) {
456 * MPU disabled or M profile PPB access: use default memory map.
457 * The other case which uses the default memory map in the
458 * v7M ARM ARM pseudocode is exception vector reads from the vector
459 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
460 * which always does a direct read using address_space_ldl(), rather
461 * than going via this function, so we don't need to check that here.
463 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
464 } else { /* MPU enabled */
465 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
466 /* region search */
467 uint32_t base = env->pmsav7.drbar[n];
468 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
469 uint32_t rmask;
470 bool srdis = false;
472 if (!(env->pmsav7.drsr[n] & 0x1)) {
473 continue;
476 if (!rsize) {
477 qemu_log_mask(LOG_GUEST_ERROR,
478 "DRSR[%d]: Rsize field cannot be 0\n", n);
479 continue;
481 rsize++;
482 rmask = (1ull << rsize) - 1;
484 if (base & rmask) {
485 qemu_log_mask(LOG_GUEST_ERROR,
486 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
487 "to DRSR region size, mask = 0x%" PRIx32 "\n",
488 n, base, rmask);
489 continue;
492 if (address < base || address > base + rmask) {
494 * Address not in this region. We must check whether the
495 * region covers addresses in the same page as our address.
496 * In that case we must not report a size that covers the
497 * whole page for a subsequent hit against a different MPU
498 * region or the background region, because it would result in
499 * incorrect TLB hits for subsequent accesses to addresses that
500 * are in this MPU region.
502 if (ranges_overlap(base, rmask,
503 address & TARGET_PAGE_MASK,
504 TARGET_PAGE_SIZE)) {
505 *page_size = 1;
507 continue;
510 /* Region matched */
512 if (rsize >= 8) { /* no subregions for regions < 256 bytes */
513 int i, snd;
514 uint32_t srdis_mask;
516 rsize -= 3; /* sub region size (power of 2) */
517 snd = ((address - base) >> rsize) & 0x7;
518 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
520 srdis_mask = srdis ? 0x3 : 0x0;
521 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
523 * This will check in groups of 2, 4 and then 8, whether
524 * the subregion bits are consistent. rsize is incremented
525 * back up to give the region size, considering consistent
526 * adjacent subregions as one region. Stop testing if rsize
527 * is already big enough for an entire QEMU page.
529 int snd_rounded = snd & ~(i - 1);
530 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
531 snd_rounded + 8, i);
532 if (srdis_mask ^ srdis_multi) {
533 break;
535 srdis_mask = (srdis_mask << i) | srdis_mask;
536 rsize++;
539 if (srdis) {
540 continue;
542 if (rsize < TARGET_PAGE_BITS) {
543 *page_size = 1 << rsize;
545 break;
548 if (n == -1) { /* no hits */
549 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
550 /* background fault */
551 fi->type = ARMFault_Background;
552 return true;
554 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
555 } else { /* a MPU hit! */
556 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
557 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
559 if (m_is_system_region(env, address)) {
560 /* System space is always execute never */
561 xn = 1;
564 if (is_user) { /* User mode AP bit decoding */
565 switch (ap) {
566 case 0:
567 case 1:
568 case 5:
569 break; /* no access */
570 case 3:
571 *prot |= PAGE_WRITE;
572 /* fall through */
573 case 2:
574 case 6:
575 *prot |= PAGE_READ | PAGE_EXEC;
576 break;
577 case 7:
578 /* for v7M, same as 6; for R profile a reserved value */
579 if (arm_feature(env, ARM_FEATURE_M)) {
580 *prot |= PAGE_READ | PAGE_EXEC;
581 break;
583 /* fall through */
584 default:
585 qemu_log_mask(LOG_GUEST_ERROR,
586 "DRACR[%d]: Bad value for AP bits: 0x%"
587 PRIx32 "\n", n, ap);
589 } else { /* Priv. mode AP bits decoding */
590 switch (ap) {
591 case 0:
592 break; /* no access */
593 case 1:
594 case 2:
595 case 3:
596 *prot |= PAGE_WRITE;
597 /* fall through */
598 case 5:
599 case 6:
600 *prot |= PAGE_READ | PAGE_EXEC;
601 break;
602 case 7:
603 /* for v7M, same as 6; for R profile a reserved value */
604 if (arm_feature(env, ARM_FEATURE_M)) {
605 *prot |= PAGE_READ | PAGE_EXEC;
606 break;
608 /* fall through */
609 default:
610 qemu_log_mask(LOG_GUEST_ERROR,
611 "DRACR[%d]: Bad value for AP bits: 0x%"
612 PRIx32 "\n", n, ap);
616 /* execute never */
617 if (xn) {
618 *prot &= ~PAGE_EXEC;
623 fi->type = ARMFault_Permission;
624 fi->level = 1;
625 return !(*prot & (1 << access_type));
628 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
629 MMUAccessType access_type, ARMMMUIdx mmu_idx,
630 hwaddr *phys_ptr, MemTxAttrs *txattrs,
631 int *prot, bool *is_subpage,
632 ARMMMUFaultInfo *fi, uint32_t *mregion)
635 * Perform a PMSAv8 MPU lookup (without also doing the SAU check
636 * that a full phys-to-virt translation does).
637 * mregion is (if not NULL) set to the region number which matched,
638 * or -1 if no region number is returned (MPU off, address did not
639 * hit a region, address hit in multiple regions).
640 * We set is_subpage to true if the region hit doesn't cover the
641 * entire TARGET_PAGE the address is within.
643 ARMCPU *cpu = env_archcpu(env);
644 bool is_user = regime_is_user(env, mmu_idx);
645 uint32_t secure = regime_is_secure(env, mmu_idx);
646 int n;
647 int matchregion = -1;
648 bool hit = false;
649 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
650 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
652 *is_subpage = false;
653 *phys_ptr = address;
654 *prot = 0;
655 if (mregion) {
656 *mregion = -1;
660 * Unlike the ARM ARM pseudocode, we don't need to check whether this
661 * was an exception vector read from the vector table (which is always
662 * done using the default system address map), because those accesses
663 * are done in arm_v7m_load_vector(), which always does a direct
664 * read using address_space_ldl(), rather than going via this function.
666 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
667 hit = true;
668 } else if (m_is_ppb_region(env, address)) {
669 hit = true;
670 } else {
671 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
672 hit = true;
675 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
676 /* region search */
678 * Note that the base address is bits [31:5] from the register
679 * with bits [4:0] all zeroes, but the limit address is bits
680 * [31:5] from the register with bits [4:0] all ones.
682 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
683 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
685 if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
686 /* Region disabled */
687 continue;
690 if (address < base || address > limit) {
692 * Address not in this region. We must check whether the
693 * region covers addresses in the same page as our address.
694 * In that case we must not report a size that covers the
695 * whole page for a subsequent hit against a different MPU
696 * region or the background region, because it would result in
697 * incorrect TLB hits for subsequent accesses to addresses that
698 * are in this MPU region.
700 if (limit >= base &&
701 ranges_overlap(base, limit - base + 1,
702 addr_page_base,
703 TARGET_PAGE_SIZE)) {
704 *is_subpage = true;
706 continue;
709 if (base > addr_page_base || limit < addr_page_limit) {
710 *is_subpage = true;
713 if (matchregion != -1) {
715 * Multiple regions match -- always a failure (unlike
716 * PMSAv7 where highest-numbered-region wins)
718 fi->type = ARMFault_Permission;
719 fi->level = 1;
720 return true;
723 matchregion = n;
724 hit = true;
728 if (!hit) {
729 /* background fault */
730 fi->type = ARMFault_Background;
731 return true;
734 if (matchregion == -1) {
735 /* hit using the background region */
736 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
737 } else {
738 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
739 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
740 bool pxn = false;
742 if (arm_feature(env, ARM_FEATURE_V8_1M)) {
743 pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
746 if (m_is_system_region(env, address)) {
747 /* System space is always execute never */
748 xn = 1;
751 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
752 if (*prot && !xn && !(pxn && !is_user)) {
753 *prot |= PAGE_EXEC;
756 * We don't need to look the attribute up in the MAIR0/MAIR1
757 * registers because that only tells us about cacheability.
759 if (mregion) {
760 *mregion = matchregion;
764 fi->type = ARMFault_Permission;
765 fi->level = 1;
766 return !(*prot & (1 << access_type));
769 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
770 MMUAccessType access_type, ARMMMUIdx mmu_idx,
771 hwaddr *phys_ptr, MemTxAttrs *txattrs,
772 int *prot, target_ulong *page_size,
773 ARMMMUFaultInfo *fi)
775 uint32_t secure = regime_is_secure(env, mmu_idx);
776 V8M_SAttributes sattrs = {};
777 bool ret;
778 bool mpu_is_subpage;
780 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
781 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
782 if (access_type == MMU_INST_FETCH) {
784 * Instruction fetches always use the MMU bank and the
785 * transaction attribute determined by the fetch address,
786 * regardless of CPU state. This is painful for QEMU
787 * to handle, because it would mean we need to encode
788 * into the mmu_idx not just the (user, negpri) information
789 * for the current security state but also that for the
790 * other security state, which would balloon the number
791 * of mmu_idx values needed alarmingly.
792 * Fortunately we can avoid this because it's not actually
793 * possible to arbitrarily execute code from memory with
794 * the wrong security attribute: it will always generate
795 * an exception of some kind or another, apart from the
796 * special case of an NS CPU executing an SG instruction
797 * in S&NSC memory. So we always just fail the translation
798 * here and sort things out in the exception handler
799 * (including possibly emulating an SG instruction).
801 if (sattrs.ns != !secure) {
802 if (sattrs.nsc) {
803 fi->type = ARMFault_QEMU_NSCExec;
804 } else {
805 fi->type = ARMFault_QEMU_SFault;
807 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
808 *phys_ptr = address;
809 *prot = 0;
810 return true;
812 } else {
814 * For data accesses we always use the MMU bank indicated
815 * by the current CPU state, but the security attributes
816 * might downgrade a secure access to nonsecure.
818 if (sattrs.ns) {
819 txattrs->secure = false;
820 } else if (!secure) {
822 * NS access to S memory must fault.
823 * Architecturally we should first check whether the
824 * MPU information for this address indicates that we
825 * are doing an unaligned access to Device memory, which
826 * should generate a UsageFault instead. QEMU does not
827 * currently check for that kind of unaligned access though.
828 * If we added it we would need to do so as a special case
829 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
831 fi->type = ARMFault_QEMU_SFault;
832 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
833 *phys_ptr = address;
834 *prot = 0;
835 return true;
840 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
841 txattrs, prot, &mpu_is_subpage, fi, NULL);
842 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
843 return ret;
847 * get_phys_addr - get the physical address for this virtual address
849 * Find the physical address corresponding to the given virtual address,
850 * by doing a translation table walk on MMU based systems or using the
851 * MPU state on MPU based systems.
853 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
854 * prot and page_size may not be filled in, and the populated fsr value provides
855 * information on why the translation aborted, in the format of a
856 * DFSR/IFSR fault register, with the following caveats:
857 * * we honour the short vs long DFSR format differences.
858 * * the WnR bit is never set (the caller must do this).
859 * * for PSMAv5 based systems we don't bother to return a full FSR format
860 * value.
862 * @env: CPUARMState
863 * @address: virtual address to get physical address for
864 * @access_type: 0 for read, 1 for write, 2 for execute
865 * @mmu_idx: MMU index indicating required translation regime
866 * @phys_ptr: set to the physical address corresponding to the virtual address
867 * @attrs: set to the memory transaction attributes to use
868 * @prot: set to the permissions for the page containing phys_ptr
869 * @page_size: set to the size of the page containing phys_ptr
870 * @fi: set to fault info if the translation fails
871 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
873 bool get_phys_addr(CPUARMState *env, target_ulong address,
874 MMUAccessType access_type, ARMMMUIdx mmu_idx,
875 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
876 target_ulong *page_size,
877 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
879 ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
881 if (mmu_idx != s1_mmu_idx) {
883 * Call ourselves recursively to do the stage 1 and then stage 2
884 * translations if mmu_idx is a two-stage regime.
886 if (arm_feature(env, ARM_FEATURE_EL2)) {
887 hwaddr ipa;
888 int s2_prot;
889 int ret;
890 bool ipa_secure;
891 ARMCacheAttrs cacheattrs2 = {};
892 ARMMMUIdx s2_mmu_idx;
893 bool is_el0;
895 ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
896 attrs, prot, page_size, fi, cacheattrs);
898 /* If S1 fails or S2 is disabled, return early. */
899 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
900 *phys_ptr = ipa;
901 return ret;
904 ipa_secure = attrs->secure;
905 if (arm_is_secure_below_el3(env)) {
906 if (ipa_secure) {
907 attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
908 } else {
909 attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
911 } else {
912 assert(!ipa_secure);
915 s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
916 is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
918 /* S1 is done. Now do S2 translation. */
919 ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
920 phys_ptr, attrs, &s2_prot,
921 page_size, fi, &cacheattrs2);
922 fi->s2addr = ipa;
923 /* Combine the S1 and S2 perms. */
924 *prot &= s2_prot;
926 /* If S2 fails, return early. */
927 if (ret) {
928 return ret;
931 /* Combine the S1 and S2 cache attributes. */
932 if (arm_hcr_el2_eff(env) & HCR_DC) {
934 * HCR.DC forces the first stage attributes to
935 * Normal Non-Shareable,
936 * Inner Write-Back Read-Allocate Write-Allocate,
937 * Outer Write-Back Read-Allocate Write-Allocate.
938 * Do not overwrite Tagged within attrs.
940 if (cacheattrs->attrs != 0xf0) {
941 cacheattrs->attrs = 0xff;
943 cacheattrs->shareability = 0;
945 *cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
947 /* Check if IPA translates to secure or non-secure PA space. */
948 if (arm_is_secure_below_el3(env)) {
949 if (ipa_secure) {
950 attrs->secure =
951 !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
952 } else {
953 attrs->secure =
954 !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
955 || (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)));
958 return 0;
959 } else {
961 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
963 mmu_idx = stage_1_mmu_idx(mmu_idx);
968 * The page table entries may downgrade secure to non-secure, but
969 * cannot upgrade an non-secure translation regime's attributes
970 * to secure.
972 attrs->secure = regime_is_secure(env, mmu_idx);
973 attrs->user = regime_is_user(env, mmu_idx);
976 * Fast Context Switch Extension. This doesn't exist at all in v8.
977 * In v7 and earlier it affects all stage 1 translations.
979 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
980 && !arm_feature(env, ARM_FEATURE_V8)) {
981 if (regime_el(env, mmu_idx) == 3) {
982 address += env->cp15.fcseidr_s;
983 } else {
984 address += env->cp15.fcseidr_ns;
988 if (arm_feature(env, ARM_FEATURE_PMSA)) {
989 bool ret;
990 *page_size = TARGET_PAGE_SIZE;
992 if (arm_feature(env, ARM_FEATURE_V8)) {
993 /* PMSAv8 */
994 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
995 phys_ptr, attrs, prot, page_size, fi);
996 } else if (arm_feature(env, ARM_FEATURE_V7)) {
997 /* PMSAv7 */
998 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
999 phys_ptr, prot, page_size, fi);
1000 } else {
1001 /* Pre-v7 MPU */
1002 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
1003 phys_ptr, prot, fi);
1005 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
1006 " mmu_idx %u -> %s (prot %c%c%c)\n",
1007 access_type == MMU_DATA_LOAD ? "reading" :
1008 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
1009 (uint32_t)address, mmu_idx,
1010 ret ? "Miss" : "Hit",
1011 *prot & PAGE_READ ? 'r' : '-',
1012 *prot & PAGE_WRITE ? 'w' : '-',
1013 *prot & PAGE_EXEC ? 'x' : '-');
1015 return ret;
1018 /* Definitely a real MMU, not an MPU */
1020 if (regime_translation_disabled(env, mmu_idx)) {
1021 uint64_t hcr;
1022 uint8_t memattr;
1025 * MMU disabled. S1 addresses within aa64 translation regimes are
1026 * still checked for bounds -- see AArch64.TranslateAddressS1Off.
1028 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
1029 int r_el = regime_el(env, mmu_idx);
1030 if (arm_el_is_aa64(env, r_el)) {
1031 int pamax = arm_pamax(env_archcpu(env));
1032 uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
1033 int addrtop, tbi;
1035 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
1036 if (access_type == MMU_INST_FETCH) {
1037 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
1039 tbi = (tbi >> extract64(address, 55, 1)) & 1;
1040 addrtop = (tbi ? 55 : 63);
1042 if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
1043 fi->type = ARMFault_AddressSize;
1044 fi->level = 0;
1045 fi->stage2 = false;
1046 return 1;
1050 * When TBI is disabled, we've just validated that all of the
1051 * bits above PAMax are zero, so logically we only need to
1052 * clear the top byte for TBI. But it's clearer to follow
1053 * the pseudocode set of addrdesc.paddress.
1055 address = extract64(address, 0, 52);
1058 *phys_ptr = address;
1059 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1060 *page_size = TARGET_PAGE_SIZE;
1062 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
1063 hcr = arm_hcr_el2_eff(env);
1064 cacheattrs->shareability = 0;
1065 cacheattrs->is_s2_format = false;
1066 if (hcr & HCR_DC) {
1067 if (hcr & HCR_DCT) {
1068 memattr = 0xf0; /* Tagged, Normal, WB, RWA */
1069 } else {
1070 memattr = 0xff; /* Normal, WB, RWA */
1072 } else if (access_type == MMU_INST_FETCH) {
1073 if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
1074 memattr = 0xee; /* Normal, WT, RA, NT */
1075 } else {
1076 memattr = 0x44; /* Normal, NC, No */
1078 cacheattrs->shareability = 2; /* outer sharable */
1079 } else {
1080 memattr = 0x00; /* Device, nGnRnE */
1082 cacheattrs->attrs = memattr;
1083 return 0;
1086 if (regime_using_lpae_format(env, mmu_idx)) {
1087 return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
1088 phys_ptr, attrs, prot, page_size,
1089 fi, cacheattrs);
1090 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
1091 return get_phys_addr_v6(env, address, access_type, mmu_idx,
1092 phys_ptr, attrs, prot, page_size, fi);
1093 } else {
1094 return get_phys_addr_v5(env, address, access_type, mmu_idx,
1095 phys_ptr, prot, page_size, fi);