file-posix: remove incorrect coroutine_fn calls
[qemu/kevin.git] / target / arm / ptw.c
blob6015121b99bcb263229ed3b6fea85e966e1bdb59
1 /*
2 * ARM page table walking.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/range.h"
12 #include "qemu/main-loop.h"
13 #include "exec/exec-all.h"
14 #include "cpu.h"
15 #include "internals.h"
16 #include "idau.h"
17 #ifdef CONFIG_TCG
18 # include "tcg/oversized-guest.h"
19 #endif
21 typedef struct S1Translate {
22 ARMMMUIdx in_mmu_idx;
23 ARMMMUIdx in_ptw_idx;
24 ARMSecuritySpace in_space;
25 bool in_secure;
26 bool in_debug;
28 * If this is stage 2 of a stage 1+2 page table walk, then this must
29 * be true if stage 1 is an EL0 access; otherwise this is ignored.
30 * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
32 bool in_s1_is_el0;
33 bool out_secure;
34 bool out_rw;
35 bool out_be;
36 ARMSecuritySpace out_space;
37 hwaddr out_virt;
38 hwaddr out_phys;
39 void *out_host;
40 } S1Translate;
42 static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
43 target_ulong address,
44 MMUAccessType access_type,
45 GetPhysAddrResult *result,
46 ARMMMUFaultInfo *fi);
48 static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
49 target_ulong address,
50 MMUAccessType access_type,
51 GetPhysAddrResult *result,
52 ARMMMUFaultInfo *fi);
54 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
55 static const uint8_t pamax_map[] = {
56 [0] = 32,
57 [1] = 36,
58 [2] = 40,
59 [3] = 42,
60 [4] = 44,
61 [5] = 48,
62 [6] = 52,
65 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
66 unsigned int arm_pamax(ARMCPU *cpu)
68 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
69 unsigned int parange =
70 FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
73 * id_aa64mmfr0 is a read-only register so values outside of the
74 * supported mappings can be considered an implementation error.
76 assert(parange < ARRAY_SIZE(pamax_map));
77 return pamax_map[parange];
81 * In machvirt_init, we call arm_pamax on a cpu that is not fully
82 * initialized, so we can't rely on the propagation done in realize.
84 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
85 arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
86 /* v7 with LPAE */
87 return 40;
89 /* Anything else */
90 return 32;
94 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
96 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
98 switch (mmu_idx) {
99 case ARMMMUIdx_E10_0:
100 return ARMMMUIdx_Stage1_E0;
101 case ARMMMUIdx_E10_1:
102 return ARMMMUIdx_Stage1_E1;
103 case ARMMMUIdx_E10_1_PAN:
104 return ARMMMUIdx_Stage1_E1_PAN;
105 default:
106 return mmu_idx;
110 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
112 return stage_1_mmu_idx(arm_mmu_idx(env));
116 * Return where we should do ptw loads from for a stage 2 walk.
117 * This depends on whether the address we are looking up is a
118 * Secure IPA or a NonSecure IPA, which we know from whether this is
119 * Stage2 or Stage2_S.
120 * If this is the Secure EL1&0 regime we need to check the NSW and SW bits.
122 static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx)
124 bool s2walk_secure;
127 * We're OK to check the current state of the CPU here because
128 * (1) we always invalidate all TLBs when the SCR_EL3.NS bit changes
129 * (2) there's no way to do a lookup that cares about Stage 2 for a
130 * different security state to the current one for AArch64, and AArch32
131 * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
132 * an NS stage 1+2 lookup while the NS bit is 0.)
134 if (!arm_is_secure_below_el3(env) || !arm_el_is_aa64(env, 3)) {
135 return ARMMMUIdx_Phys_NS;
137 if (stage2idx == ARMMMUIdx_Stage2_S) {
138 s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
139 } else {
140 s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
142 return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
146 static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
148 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
151 /* Return the TTBR associated with this translation regime */
152 static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
154 if (mmu_idx == ARMMMUIdx_Stage2) {
155 return env->cp15.vttbr_el2;
157 if (mmu_idx == ARMMMUIdx_Stage2_S) {
158 return env->cp15.vsttbr_el2;
160 if (ttbrn == 0) {
161 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
162 } else {
163 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
167 /* Return true if the specified stage of address translation is disabled */
168 static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
169 bool is_secure)
171 uint64_t hcr_el2;
173 if (arm_feature(env, ARM_FEATURE_M)) {
174 switch (env->v7m.mpu_ctrl[is_secure] &
175 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
176 case R_V7M_MPU_CTRL_ENABLE_MASK:
177 /* Enabled, but not for HardFault and NMI */
178 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
179 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
180 /* Enabled for all cases */
181 return false;
182 case 0:
183 default:
185 * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
186 * we warned about that in armv7m_nvic.c when the guest set it.
188 return true;
192 hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
194 switch (mmu_idx) {
195 case ARMMMUIdx_Stage2:
196 case ARMMMUIdx_Stage2_S:
197 /* HCR.DC means HCR.VM behaves as 1 */
198 return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
200 case ARMMMUIdx_E10_0:
201 case ARMMMUIdx_E10_1:
202 case ARMMMUIdx_E10_1_PAN:
203 /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
204 if (hcr_el2 & HCR_TGE) {
205 return true;
207 break;
209 case ARMMMUIdx_Stage1_E0:
210 case ARMMMUIdx_Stage1_E1:
211 case ARMMMUIdx_Stage1_E1_PAN:
212 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
213 if (hcr_el2 & HCR_DC) {
214 return true;
216 break;
218 case ARMMMUIdx_E20_0:
219 case ARMMMUIdx_E20_2:
220 case ARMMMUIdx_E20_2_PAN:
221 case ARMMMUIdx_E2:
222 case ARMMMUIdx_E3:
223 break;
225 case ARMMMUIdx_Phys_S:
226 case ARMMMUIdx_Phys_NS:
227 case ARMMMUIdx_Phys_Root:
228 case ARMMMUIdx_Phys_Realm:
229 /* No translation for physical address spaces. */
230 return true;
232 default:
233 g_assert_not_reached();
236 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
239 static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
240 ARMSecuritySpace pspace,
241 ARMMMUFaultInfo *fi)
243 MemTxAttrs attrs = {
244 .secure = true,
245 .space = ARMSS_Root,
247 ARMCPU *cpu = env_archcpu(env);
248 uint64_t gpccr = env->cp15.gpccr_el3;
249 unsigned pps, pgs, l0gptsz, level = 0;
250 uint64_t tableaddr, pps_mask, align, entry, index;
251 AddressSpace *as;
252 MemTxResult result;
253 int gpi;
255 if (!FIELD_EX64(gpccr, GPCCR, GPC)) {
256 return true;
260 * GPC Priority 1 (R_GMGRR):
261 * R_JWCSM: If the configuration of GPCCR_EL3 is invalid,
262 * the access fails as GPT walk fault at level 0.
266 * Configuration of PPS to a value exceeding the implemented
267 * physical address size is invalid.
269 pps = FIELD_EX64(gpccr, GPCCR, PPS);
270 if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
271 goto fault_walk;
273 pps = pamax_map[pps];
274 pps_mask = MAKE_64BIT_MASK(0, pps);
276 switch (FIELD_EX64(gpccr, GPCCR, SH)) {
277 case 0b10: /* outer shareable */
278 break;
279 case 0b00: /* non-shareable */
280 case 0b11: /* inner shareable */
281 /* Inner and Outer non-cacheable requires Outer shareable. */
282 if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 &&
283 FIELD_EX64(gpccr, GPCCR, IRGN) == 0) {
284 goto fault_walk;
286 break;
287 default: /* reserved */
288 goto fault_walk;
291 switch (FIELD_EX64(gpccr, GPCCR, PGS)) {
292 case 0b00: /* 4KB */
293 pgs = 12;
294 break;
295 case 0b01: /* 64KB */
296 pgs = 16;
297 break;
298 case 0b10: /* 16KB */
299 pgs = 14;
300 break;
301 default: /* reserved */
302 goto fault_walk;
305 /* Note this field is read-only and fixed at reset. */
306 l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ);
309 * GPC Priority 2: Secure, Realm or Root address exceeds PPS.
310 * R_CPDSB: A NonSecure physical address input exceeding PPS
311 * does not experience any fault.
313 if (paddress & ~pps_mask) {
314 if (pspace == ARMSS_NonSecure) {
315 return true;
317 goto fault_size;
320 /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
321 tableaddr = env->cp15.gptbr_el3 << 12;
322 if (tableaddr & ~pps_mask) {
323 goto fault_size;
327 * BADDR is aligned per a function of PPS and L0GPTSZ.
328 * These bits of GPTBR_EL3 are RES0, but are not a configuration error,
329 * unlike the RES0 bits of the GPT entries (R_XNKFZ).
331 align = MAX(pps - l0gptsz + 3, 12);
332 align = MAKE_64BIT_MASK(0, align);
333 tableaddr &= ~align;
335 as = arm_addressspace(env_cpu(env), attrs);
337 /* Level 0 lookup. */
338 index = extract64(paddress, l0gptsz, pps - l0gptsz);
339 tableaddr += index * 8;
340 entry = address_space_ldq_le(as, tableaddr, attrs, &result);
341 if (result != MEMTX_OK) {
342 goto fault_eabt;
345 switch (extract32(entry, 0, 4)) {
346 case 1: /* block descriptor */
347 if (entry >> 8) {
348 goto fault_walk; /* RES0 bits not 0 */
350 gpi = extract32(entry, 4, 4);
351 goto found;
352 case 3: /* table descriptor */
353 tableaddr = entry & ~0xf;
354 align = MAX(l0gptsz - pgs - 1, 12);
355 align = MAKE_64BIT_MASK(0, align);
356 if (tableaddr & (~pps_mask | align)) {
357 goto fault_walk; /* RES0 bits not 0 */
359 break;
360 default: /* invalid */
361 goto fault_walk;
364 /* Level 1 lookup */
365 level = 1;
366 index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4);
367 tableaddr += index * 8;
368 entry = address_space_ldq_le(as, tableaddr, attrs, &result);
369 if (result != MEMTX_OK) {
370 goto fault_eabt;
373 switch (extract32(entry, 0, 4)) {
374 case 1: /* contiguous descriptor */
375 if (entry >> 10) {
376 goto fault_walk; /* RES0 bits not 0 */
379 * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE,
380 * and because we cannot invalidate by pa, and thus will always
381 * flush entire tlbs, we don't actually care about the range here
382 * and can simply extract the GPI as the result.
384 if (extract32(entry, 8, 2) == 0) {
385 goto fault_walk; /* reserved contig */
387 gpi = extract32(entry, 4, 4);
388 break;
389 default:
390 index = extract64(paddress, pgs, 4);
391 gpi = extract64(entry, index * 4, 4);
392 break;
395 found:
396 switch (gpi) {
397 case 0b0000: /* no access */
398 break;
399 case 0b1111: /* all access */
400 return true;
401 case 0b1000:
402 case 0b1001:
403 case 0b1010:
404 case 0b1011:
405 if (pspace == (gpi & 3)) {
406 return true;
408 break;
409 default:
410 goto fault_walk; /* reserved */
413 fi->gpcf = GPCF_Fail;
414 goto fault_common;
415 fault_eabt:
416 fi->gpcf = GPCF_EABT;
417 goto fault_common;
418 fault_size:
419 fi->gpcf = GPCF_AddressSize;
420 goto fault_common;
421 fault_walk:
422 fi->gpcf = GPCF_Walk;
423 fault_common:
424 fi->level = level;
425 fi->paddr = paddress;
426 fi->paddr_space = pspace;
427 return false;
430 static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
433 * For an S1 page table walk, the stage 1 attributes are always
434 * some form of "this is Normal memory". The combined S1+S2
435 * attributes are therefore only Device if stage 2 specifies Device.
436 * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
437 * ie when cacheattrs.attrs bits [3:2] are 0b00.
438 * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
439 * when cacheattrs.attrs bit [2] is 0.
441 if (hcr & HCR_FWB) {
442 return (attrs & 0x4) == 0;
443 } else {
444 return (attrs & 0xc) == 0;
448 /* Translate a S1 pagetable walk through S2 if needed. */
449 static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
450 hwaddr addr, ARMMMUFaultInfo *fi)
452 ARMSecuritySpace space = ptw->in_space;
453 bool is_secure = ptw->in_secure;
454 ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
455 ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
456 uint8_t pte_attrs;
458 ptw->out_virt = addr;
460 if (unlikely(ptw->in_debug)) {
462 * From gdbstub, do not use softmmu so that we don't modify the
463 * state of the cpu at all, including softmmu tlb contents.
465 S1Translate s2ptw = {
466 .in_mmu_idx = s2_mmu_idx,
467 .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
468 .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
469 .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
470 : space == ARMSS_Realm ? ARMSS_Realm
471 : ARMSS_NonSecure),
472 .in_debug = true,
474 GetPhysAddrResult s2 = { };
476 if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) {
477 goto fail;
480 ptw->out_phys = s2.f.phys_addr;
481 pte_attrs = s2.cacheattrs.attrs;
482 ptw->out_host = NULL;
483 ptw->out_rw = false;
484 ptw->out_secure = s2.f.attrs.secure;
485 ptw->out_space = s2.f.attrs.space;
486 } else {
487 #ifdef CONFIG_TCG
488 CPUTLBEntryFull *full;
489 int flags;
491 env->tlb_fi = fi;
492 flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD,
493 arm_to_core_mmu_idx(s2_mmu_idx),
494 true, &ptw->out_host, &full, 0);
495 env->tlb_fi = NULL;
497 if (unlikely(flags & TLB_INVALID_MASK)) {
498 goto fail;
500 ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
501 ptw->out_rw = full->prot & PAGE_WRITE;
502 pte_attrs = full->pte_attrs;
503 ptw->out_secure = full->attrs.secure;
504 ptw->out_space = full->attrs.space;
505 #else
506 g_assert_not_reached();
507 #endif
510 if (regime_is_stage2(s2_mmu_idx)) {
511 uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
513 if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
515 * PTW set and S1 walk touched S2 Device memory:
516 * generate Permission fault.
518 fi->type = ARMFault_Permission;
519 fi->s2addr = addr;
520 fi->stage2 = true;
521 fi->s1ptw = true;
522 fi->s1ns = !is_secure;
523 return false;
527 ptw->out_be = regime_translation_big_endian(env, mmu_idx);
528 return true;
530 fail:
531 assert(fi->type != ARMFault_None);
532 if (fi->type == ARMFault_GPCFOnOutput) {
533 fi->type = ARMFault_GPCFOnWalk;
535 fi->s2addr = addr;
536 fi->stage2 = true;
537 fi->s1ptw = true;
538 fi->s1ns = !is_secure;
539 return false;
542 /* All loads done in the course of a page table walk go through here. */
543 static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
544 ARMMMUFaultInfo *fi)
546 CPUState *cs = env_cpu(env);
547 void *host = ptw->out_host;
548 uint32_t data;
550 if (likely(host)) {
551 /* Page tables are in RAM, and we have the host address. */
552 data = qatomic_read((uint32_t *)host);
553 if (ptw->out_be) {
554 data = be32_to_cpu(data);
555 } else {
556 data = le32_to_cpu(data);
558 } else {
559 /* Page tables are in MMIO. */
560 MemTxAttrs attrs = {
561 .secure = ptw->out_secure,
562 .space = ptw->out_space,
564 AddressSpace *as = arm_addressspace(cs, attrs);
565 MemTxResult result = MEMTX_OK;
567 if (ptw->out_be) {
568 data = address_space_ldl_be(as, ptw->out_phys, attrs, &result);
569 } else {
570 data = address_space_ldl_le(as, ptw->out_phys, attrs, &result);
572 if (unlikely(result != MEMTX_OK)) {
573 fi->type = ARMFault_SyncExternalOnWalk;
574 fi->ea = arm_extabort_type(result);
575 return 0;
578 return data;
581 static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
582 ARMMMUFaultInfo *fi)
584 CPUState *cs = env_cpu(env);
585 void *host = ptw->out_host;
586 uint64_t data;
588 if (likely(host)) {
589 /* Page tables are in RAM, and we have the host address. */
590 #ifdef CONFIG_ATOMIC64
591 data = qatomic_read__nocheck((uint64_t *)host);
592 if (ptw->out_be) {
593 data = be64_to_cpu(data);
594 } else {
595 data = le64_to_cpu(data);
597 #else
598 if (ptw->out_be) {
599 data = ldq_be_p(host);
600 } else {
601 data = ldq_le_p(host);
603 #endif
604 } else {
605 /* Page tables are in MMIO. */
606 MemTxAttrs attrs = {
607 .secure = ptw->out_secure,
608 .space = ptw->out_space,
610 AddressSpace *as = arm_addressspace(cs, attrs);
611 MemTxResult result = MEMTX_OK;
613 if (ptw->out_be) {
614 data = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
615 } else {
616 data = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
618 if (unlikely(result != MEMTX_OK)) {
619 fi->type = ARMFault_SyncExternalOnWalk;
620 fi->ea = arm_extabort_type(result);
621 return 0;
624 return data;
627 static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
628 uint64_t new_val, S1Translate *ptw,
629 ARMMMUFaultInfo *fi)
631 #ifdef TARGET_AARCH64
632 uint64_t cur_val;
633 void *host = ptw->out_host;
635 if (unlikely(!host)) {
636 fi->type = ARMFault_UnsuppAtomicUpdate;
637 fi->s1ptw = true;
638 return 0;
642 * Raising a stage2 Protection fault for an atomic update to a read-only
643 * page is delayed until it is certain that there is a change to make.
645 if (unlikely(!ptw->out_rw)) {
646 int flags;
647 void *discard;
649 env->tlb_fi = fi;
650 flags = probe_access_flags(env, ptw->out_virt, 0, MMU_DATA_STORE,
651 arm_to_core_mmu_idx(ptw->in_ptw_idx),
652 true, &discard, 0);
653 env->tlb_fi = NULL;
655 if (unlikely(flags & TLB_INVALID_MASK)) {
656 assert(fi->type != ARMFault_None);
657 fi->s2addr = ptw->out_virt;
658 fi->stage2 = true;
659 fi->s1ptw = true;
660 fi->s1ns = !ptw->in_secure;
661 return 0;
664 /* In case CAS mismatches and we loop, remember writability. */
665 ptw->out_rw = true;
668 #ifdef CONFIG_ATOMIC64
669 if (ptw->out_be) {
670 old_val = cpu_to_be64(old_val);
671 new_val = cpu_to_be64(new_val);
672 cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
673 cur_val = be64_to_cpu(cur_val);
674 } else {
675 old_val = cpu_to_le64(old_val);
676 new_val = cpu_to_le64(new_val);
677 cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
678 cur_val = le64_to_cpu(cur_val);
680 #else
682 * We can't support the full 64-bit atomic cmpxchg on the host.
683 * Because this is only used for FEAT_HAFDBS, which is only for AA64,
684 * we know that TCG_OVERSIZED_GUEST is set, which means that we are
685 * running in round-robin mode and could only race with dma i/o.
687 #if !TCG_OVERSIZED_GUEST
688 # error "Unexpected configuration"
689 #endif
690 bool locked = qemu_mutex_iothread_locked();
691 if (!locked) {
692 qemu_mutex_lock_iothread();
694 if (ptw->out_be) {
695 cur_val = ldq_be_p(host);
696 if (cur_val == old_val) {
697 stq_be_p(host, new_val);
699 } else {
700 cur_val = ldq_le_p(host);
701 if (cur_val == old_val) {
702 stq_le_p(host, new_val);
705 if (!locked) {
706 qemu_mutex_unlock_iothread();
708 #endif
710 return cur_val;
711 #else
712 /* AArch32 does not have FEAT_HADFS. */
713 g_assert_not_reached();
714 #endif
717 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
718 uint32_t *table, uint32_t address)
720 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
721 uint64_t tcr = regime_tcr(env, mmu_idx);
722 int maskshift = extract32(tcr, 0, 3);
723 uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
724 uint32_t base_mask;
726 if (address & mask) {
727 if (tcr & TTBCR_PD1) {
728 /* Translation table walk disabled for TTBR1 */
729 return false;
731 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
732 } else {
733 if (tcr & TTBCR_PD0) {
734 /* Translation table walk disabled for TTBR0 */
735 return false;
737 base_mask = ~((uint32_t)0x3fffu >> maskshift);
738 *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
740 *table |= (address >> 18) & 0x3ffc;
741 return true;
745 * Translate section/page access permissions to page R/W protection flags
746 * @env: CPUARMState
747 * @mmu_idx: MMU index indicating required translation regime
748 * @ap: The 3-bit access permissions (AP[2:0])
749 * @domain_prot: The 2-bit domain access permissions
750 * @is_user: TRUE if accessing from PL0
752 static int ap_to_rw_prot_is_user(CPUARMState *env, ARMMMUIdx mmu_idx,
753 int ap, int domain_prot, bool is_user)
755 if (domain_prot == 3) {
756 return PAGE_READ | PAGE_WRITE;
759 switch (ap) {
760 case 0:
761 if (arm_feature(env, ARM_FEATURE_V7)) {
762 return 0;
764 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
765 case SCTLR_S:
766 return is_user ? 0 : PAGE_READ;
767 case SCTLR_R:
768 return PAGE_READ;
769 default:
770 return 0;
772 case 1:
773 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
774 case 2:
775 if (is_user) {
776 return PAGE_READ;
777 } else {
778 return PAGE_READ | PAGE_WRITE;
780 case 3:
781 return PAGE_READ | PAGE_WRITE;
782 case 4: /* Reserved. */
783 return 0;
784 case 5:
785 return is_user ? 0 : PAGE_READ;
786 case 6:
787 return PAGE_READ;
788 case 7:
789 if (!arm_feature(env, ARM_FEATURE_V6K)) {
790 return 0;
792 return PAGE_READ;
793 default:
794 g_assert_not_reached();
799 * Translate section/page access permissions to page R/W protection flags
800 * @env: CPUARMState
801 * @mmu_idx: MMU index indicating required translation regime
802 * @ap: The 3-bit access permissions (AP[2:0])
803 * @domain_prot: The 2-bit domain access permissions
805 static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
806 int ap, int domain_prot)
808 return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot,
809 regime_is_user(env, mmu_idx));
813 * Translate section/page access permissions to page R/W protection flags.
814 * @ap: The 2-bit simple AP (AP[2:1])
815 * @is_user: TRUE if accessing from PL0
817 static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
819 switch (ap) {
820 case 0:
821 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
822 case 1:
823 return PAGE_READ | PAGE_WRITE;
824 case 2:
825 return is_user ? 0 : PAGE_READ;
826 case 3:
827 return PAGE_READ;
828 default:
829 g_assert_not_reached();
833 static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
835 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
838 static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
839 uint32_t address, MMUAccessType access_type,
840 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
842 int level = 1;
843 uint32_t table;
844 uint32_t desc;
845 int type;
846 int ap;
847 int domain = 0;
848 int domain_prot;
849 hwaddr phys_addr;
850 uint32_t dacr;
852 /* Pagetable walk. */
853 /* Lookup l1 descriptor. */
854 if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) {
855 /* Section translation fault if page walk is disabled by PD0 or PD1 */
856 fi->type = ARMFault_Translation;
857 goto do_fault;
859 if (!S1_ptw_translate(env, ptw, table, fi)) {
860 goto do_fault;
862 desc = arm_ldl_ptw(env, ptw, fi);
863 if (fi->type != ARMFault_None) {
864 goto do_fault;
866 type = (desc & 3);
867 domain = (desc >> 5) & 0x0f;
868 if (regime_el(env, ptw->in_mmu_idx) == 1) {
869 dacr = env->cp15.dacr_ns;
870 } else {
871 dacr = env->cp15.dacr_s;
873 domain_prot = (dacr >> (domain * 2)) & 3;
874 if (type == 0) {
875 /* Section translation fault. */
876 fi->type = ARMFault_Translation;
877 goto do_fault;
879 if (type != 2) {
880 level = 2;
882 if (domain_prot == 0 || domain_prot == 2) {
883 fi->type = ARMFault_Domain;
884 goto do_fault;
886 if (type == 2) {
887 /* 1Mb section. */
888 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
889 ap = (desc >> 10) & 3;
890 result->f.lg_page_size = 20; /* 1MB */
891 } else {
892 /* Lookup l2 entry. */
893 if (type == 1) {
894 /* Coarse pagetable. */
895 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
896 } else {
897 /* Fine pagetable. */
898 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
900 if (!S1_ptw_translate(env, ptw, table, fi)) {
901 goto do_fault;
903 desc = arm_ldl_ptw(env, ptw, fi);
904 if (fi->type != ARMFault_None) {
905 goto do_fault;
907 switch (desc & 3) {
908 case 0: /* Page translation fault. */
909 fi->type = ARMFault_Translation;
910 goto do_fault;
911 case 1: /* 64k page. */
912 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
913 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
914 result->f.lg_page_size = 16;
915 break;
916 case 2: /* 4k page. */
917 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
918 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
919 result->f.lg_page_size = 12;
920 break;
921 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
922 if (type == 1) {
923 /* ARMv6/XScale extended small page format */
924 if (arm_feature(env, ARM_FEATURE_XSCALE)
925 || arm_feature(env, ARM_FEATURE_V6)) {
926 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
927 result->f.lg_page_size = 12;
928 } else {
930 * UNPREDICTABLE in ARMv5; we choose to take a
931 * page translation fault.
933 fi->type = ARMFault_Translation;
934 goto do_fault;
936 } else {
937 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
938 result->f.lg_page_size = 10;
940 ap = (desc >> 4) & 3;
941 break;
942 default:
943 /* Never happens, but compiler isn't smart enough to tell. */
944 g_assert_not_reached();
947 result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
948 result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
949 if (!(result->f.prot & (1 << access_type))) {
950 /* Access permission fault. */
951 fi->type = ARMFault_Permission;
952 goto do_fault;
954 result->f.phys_addr = phys_addr;
955 return false;
956 do_fault:
957 fi->domain = domain;
958 fi->level = level;
959 return true;
962 static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
963 uint32_t address, MMUAccessType access_type,
964 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
966 ARMCPU *cpu = env_archcpu(env);
967 ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
968 int level = 1;
969 uint32_t table;
970 uint32_t desc;
971 uint32_t xn;
972 uint32_t pxn = 0;
973 int type;
974 int ap;
975 int domain = 0;
976 int domain_prot;
977 hwaddr phys_addr;
978 uint32_t dacr;
979 bool ns;
980 int user_prot;
982 /* Pagetable walk. */
983 /* Lookup l1 descriptor. */
984 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
985 /* Section translation fault if page walk is disabled by PD0 or PD1 */
986 fi->type = ARMFault_Translation;
987 goto do_fault;
989 if (!S1_ptw_translate(env, ptw, table, fi)) {
990 goto do_fault;
992 desc = arm_ldl_ptw(env, ptw, fi);
993 if (fi->type != ARMFault_None) {
994 goto do_fault;
996 type = (desc & 3);
997 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
998 /* Section translation fault, or attempt to use the encoding
999 * which is Reserved on implementations without PXN.
1001 fi->type = ARMFault_Translation;
1002 goto do_fault;
1004 if ((type == 1) || !(desc & (1 << 18))) {
1005 /* Page or Section. */
1006 domain = (desc >> 5) & 0x0f;
1008 if (regime_el(env, mmu_idx) == 1) {
1009 dacr = env->cp15.dacr_ns;
1010 } else {
1011 dacr = env->cp15.dacr_s;
1013 if (type == 1) {
1014 level = 2;
1016 domain_prot = (dacr >> (domain * 2)) & 3;
1017 if (domain_prot == 0 || domain_prot == 2) {
1018 /* Section or Page domain fault */
1019 fi->type = ARMFault_Domain;
1020 goto do_fault;
1022 if (type != 1) {
1023 if (desc & (1 << 18)) {
1024 /* Supersection. */
1025 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1026 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
1027 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
1028 result->f.lg_page_size = 24; /* 16MB */
1029 } else {
1030 /* Section. */
1031 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1032 result->f.lg_page_size = 20; /* 1MB */
1034 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1035 xn = desc & (1 << 4);
1036 pxn = desc & 1;
1037 ns = extract32(desc, 19, 1);
1038 } else {
1039 if (cpu_isar_feature(aa32_pxn, cpu)) {
1040 pxn = (desc >> 2) & 1;
1042 ns = extract32(desc, 3, 1);
1043 /* Lookup l2 entry. */
1044 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1045 if (!S1_ptw_translate(env, ptw, table, fi)) {
1046 goto do_fault;
1048 desc = arm_ldl_ptw(env, ptw, fi);
1049 if (fi->type != ARMFault_None) {
1050 goto do_fault;
1052 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1053 switch (desc & 3) {
1054 case 0: /* Page translation fault. */
1055 fi->type = ARMFault_Translation;
1056 goto do_fault;
1057 case 1: /* 64k page. */
1058 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1059 xn = desc & (1 << 15);
1060 result->f.lg_page_size = 16;
1061 break;
1062 case 2: case 3: /* 4k page. */
1063 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1064 xn = desc & 1;
1065 result->f.lg_page_size = 12;
1066 break;
1067 default:
1068 /* Never happens, but compiler isn't smart enough to tell. */
1069 g_assert_not_reached();
1072 if (domain_prot == 3) {
1073 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1074 } else {
1075 if (pxn && !regime_is_user(env, mmu_idx)) {
1076 xn = 1;
1078 if (xn && access_type == MMU_INST_FETCH) {
1079 fi->type = ARMFault_Permission;
1080 goto do_fault;
1083 if (arm_feature(env, ARM_FEATURE_V6K) &&
1084 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
1085 /* The simplified model uses AP[0] as an access control bit. */
1086 if ((ap & 1) == 0) {
1087 /* Access flag fault. */
1088 fi->type = ARMFault_AccessFlag;
1089 goto do_fault;
1091 result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
1092 user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1);
1093 } else {
1094 result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
1095 user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1);
1097 if (result->f.prot && !xn) {
1098 result->f.prot |= PAGE_EXEC;
1100 if (!(result->f.prot & (1 << access_type))) {
1101 /* Access permission fault. */
1102 fi->type = ARMFault_Permission;
1103 goto do_fault;
1105 if (regime_is_pan(env, mmu_idx) &&
1106 !regime_is_user(env, mmu_idx) &&
1107 user_prot &&
1108 access_type != MMU_INST_FETCH) {
1109 /* Privileged Access Never fault */
1110 fi->type = ARMFault_Permission;
1111 goto do_fault;
1114 if (ns) {
1115 /* The NS bit will (as required by the architecture) have no effect if
1116 * the CPU doesn't support TZ or this is a non-secure translation
1117 * regime, because the attribute will already be non-secure.
1119 result->f.attrs.secure = false;
1120 result->f.attrs.space = ARMSS_NonSecure;
1122 result->f.phys_addr = phys_addr;
1123 return false;
1124 do_fault:
1125 fi->domain = domain;
1126 fi->level = level;
1127 return true;
1131 * Translate S2 section/page access permissions to protection flags
1132 * @env: CPUARMState
1133 * @s2ap: The 2-bit stage2 access permissions (S2AP)
1134 * @xn: XN (execute-never) bits
1135 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
1137 static int get_S2prot_noexecute(int s2ap)
1139 int prot = 0;
1141 if (s2ap & 1) {
1142 prot |= PAGE_READ;
1144 if (s2ap & 2) {
1145 prot |= PAGE_WRITE;
1147 return prot;
1150 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
1152 int prot = get_S2prot_noexecute(s2ap);
1154 if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
1155 switch (xn) {
1156 case 0:
1157 prot |= PAGE_EXEC;
1158 break;
1159 case 1:
1160 if (s1_is_el0) {
1161 prot |= PAGE_EXEC;
1163 break;
1164 case 2:
1165 break;
1166 case 3:
1167 if (!s1_is_el0) {
1168 prot |= PAGE_EXEC;
1170 break;
1171 default:
1172 g_assert_not_reached();
1174 } else {
1175 if (!extract32(xn, 1, 1)) {
1176 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
1177 prot |= PAGE_EXEC;
1181 return prot;
1185 * Translate section/page access permissions to protection flags
1186 * @env: CPUARMState
1187 * @mmu_idx: MMU index indicating required translation regime
1188 * @is_aa64: TRUE if AArch64
1189 * @ap: The 2-bit simple AP (AP[2:1])
1190 * @xn: XN (execute-never) bit
1191 * @pxn: PXN (privileged execute-never) bit
1192 * @in_pa: The original input pa space
1193 * @out_pa: The output pa space, modified by NSTable, NS, and NSE
1195 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
1196 int ap, int xn, int pxn,
1197 ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
1199 ARMCPU *cpu = env_archcpu(env);
1200 bool is_user = regime_is_user(env, mmu_idx);
1201 int prot_rw, user_rw;
1202 bool have_wxn;
1203 int wxn = 0;
1205 assert(!regime_is_stage2(mmu_idx));
1207 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
1208 if (is_user) {
1209 prot_rw = user_rw;
1210 } else {
1212 * PAN controls can forbid data accesses but don't affect insn fetch.
1213 * Plain PAN forbids data accesses if EL0 has data permissions;
1214 * PAN3 forbids data accesses if EL0 has either data or exec perms.
1215 * Note that for AArch64 the 'user can exec' case is exactly !xn.
1216 * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
1217 * do not affect EPAN.
1219 if (user_rw && regime_is_pan(env, mmu_idx)) {
1220 prot_rw = 0;
1221 } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 &&
1222 regime_is_pan(env, mmu_idx) &&
1223 (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
1224 prot_rw = 0;
1225 } else {
1226 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
1230 if (in_pa != out_pa) {
1231 switch (in_pa) {
1232 case ARMSS_Root:
1234 * R_ZWRVD: permission fault for insn fetched from non-Root,
1235 * I_WWBFB: SIF has no effect in EL3.
1237 return prot_rw;
1238 case ARMSS_Realm:
1240 * R_PKTDS: permission fault for insn fetched from non-Realm,
1241 * for Realm EL2 or EL2&0. The corresponding fault for EL1&0
1242 * happens during any stage2 translation.
1244 switch (mmu_idx) {
1245 case ARMMMUIdx_E2:
1246 case ARMMMUIdx_E20_0:
1247 case ARMMMUIdx_E20_2:
1248 case ARMMMUIdx_E20_2_PAN:
1249 return prot_rw;
1250 default:
1251 break;
1253 break;
1254 case ARMSS_Secure:
1255 if (env->cp15.scr_el3 & SCR_SIF) {
1256 return prot_rw;
1258 break;
1259 default:
1260 /* Input NonSecure must have output NonSecure. */
1261 g_assert_not_reached();
1265 /* TODO have_wxn should be replaced with
1266 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
1267 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
1268 * compatible processors have EL2, which is required for [U]WXN.
1270 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
1272 if (have_wxn) {
1273 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
1276 if (is_aa64) {
1277 if (regime_has_2_ranges(mmu_idx) && !is_user) {
1278 xn = pxn || (user_rw & PAGE_WRITE);
1280 } else if (arm_feature(env, ARM_FEATURE_V7)) {
1281 switch (regime_el(env, mmu_idx)) {
1282 case 1:
1283 case 3:
1284 if (is_user) {
1285 xn = xn || !(user_rw & PAGE_READ);
1286 } else {
1287 int uwxn = 0;
1288 if (have_wxn) {
1289 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
1291 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
1292 (uwxn && (user_rw & PAGE_WRITE));
1294 break;
1295 case 2:
1296 break;
1298 } else {
1299 xn = wxn = 0;
1302 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
1303 return prot_rw;
1305 return prot_rw | PAGE_EXEC;
1308 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
1309 ARMMMUIdx mmu_idx)
1311 uint64_t tcr = regime_tcr(env, mmu_idx);
1312 uint32_t el = regime_el(env, mmu_idx);
1313 int select, tsz;
1314 bool epd, hpd;
1316 assert(mmu_idx != ARMMMUIdx_Stage2_S);
1318 if (mmu_idx == ARMMMUIdx_Stage2) {
1319 /* VTCR */
1320 bool sext = extract32(tcr, 4, 1);
1321 bool sign = extract32(tcr, 3, 1);
1324 * If the sign-extend bit is not the same as t0sz[3], the result
1325 * is unpredictable. Flag this as a guest error.
1327 if (sign != sext) {
1328 qemu_log_mask(LOG_GUEST_ERROR,
1329 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
1331 tsz = sextract32(tcr, 0, 4) + 8;
1332 select = 0;
1333 hpd = false;
1334 epd = false;
1335 } else if (el == 2) {
1336 /* HTCR */
1337 tsz = extract32(tcr, 0, 3);
1338 select = 0;
1339 hpd = extract64(tcr, 24, 1);
1340 epd = false;
1341 } else {
1342 int t0sz = extract32(tcr, 0, 3);
1343 int t1sz = extract32(tcr, 16, 3);
1345 if (t1sz == 0) {
1346 select = va > (0xffffffffu >> t0sz);
1347 } else {
1348 /* Note that we will detect errors later. */
1349 select = va >= ~(0xffffffffu >> t1sz);
1351 if (!select) {
1352 tsz = t0sz;
1353 epd = extract32(tcr, 7, 1);
1354 hpd = extract64(tcr, 41, 1);
1355 } else {
1356 tsz = t1sz;
1357 epd = extract32(tcr, 23, 1);
1358 hpd = extract64(tcr, 42, 1);
1360 /* For aarch32, hpd0 is not enabled without t2e as well. */
1361 hpd &= extract32(tcr, 6, 1);
1364 return (ARMVAParameters) {
1365 .tsz = tsz,
1366 .select = select,
1367 .epd = epd,
1368 .hpd = hpd,
1373 * check_s2_mmu_setup
1374 * @cpu: ARMCPU
1375 * @is_aa64: True if the translation regime is in AArch64 state
1376 * @tcr: VTCR_EL2 or VSTCR_EL2
1377 * @ds: Effective value of TCR.DS.
1378 * @iasize: Bitsize of IPAs
1379 * @stride: Page-table stride (See the ARM ARM)
1381 * Decode the starting level of the S2 lookup, returning INT_MIN if
1382 * the configuration is invalid.
1384 static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
1385 bool ds, int iasize, int stride)
1387 int sl0, sl2, startlevel, granulebits, levels;
1388 int s1_min_iasize, s1_max_iasize;
1390 sl0 = extract32(tcr, 6, 2);
1391 if (is_aa64) {
1393 * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
1394 * so interleave AArch64.S2StartLevel.
1396 switch (stride) {
1397 case 9: /* 4KB */
1398 /* SL2 is RES0 unless DS=1 & 4KB granule. */
1399 sl2 = extract64(tcr, 33, 1);
1400 if (ds && sl2) {
1401 if (sl0 != 0) {
1402 goto fail;
1404 startlevel = -1;
1405 } else {
1406 startlevel = 2 - sl0;
1407 switch (sl0) {
1408 case 2:
1409 if (arm_pamax(cpu) < 44) {
1410 goto fail;
1412 break;
1413 case 3:
1414 if (!cpu_isar_feature(aa64_st, cpu)) {
1415 goto fail;
1417 startlevel = 3;
1418 break;
1421 break;
1422 case 11: /* 16KB */
1423 switch (sl0) {
1424 case 2:
1425 if (arm_pamax(cpu) < 42) {
1426 goto fail;
1428 break;
1429 case 3:
1430 if (!ds) {
1431 goto fail;
1433 break;
1435 startlevel = 3 - sl0;
1436 break;
1437 case 13: /* 64KB */
1438 switch (sl0) {
1439 case 2:
1440 if (arm_pamax(cpu) < 44) {
1441 goto fail;
1443 break;
1444 case 3:
1445 goto fail;
1447 startlevel = 3 - sl0;
1448 break;
1449 default:
1450 g_assert_not_reached();
1452 } else {
1454 * Things are simpler for AArch32 EL2, with only 4k pages.
1455 * There is no separate S2InvalidSL function, but AArch32.S2Walk
1456 * begins with walkparms.sl0 in {'1x'}.
1458 assert(stride == 9);
1459 if (sl0 >= 2) {
1460 goto fail;
1462 startlevel = 2 - sl0;
1465 /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */
1466 levels = 3 - startlevel;
1467 granulebits = stride + 3;
1469 s1_min_iasize = levels * stride + granulebits + 1;
1470 s1_max_iasize = s1_min_iasize + (stride - 1) + 4;
1472 if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) {
1473 return startlevel;
1476 fail:
1477 return INT_MIN;
1481 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
1483 * Returns false if the translation was successful. Otherwise, phys_ptr,
1484 * attrs, prot and page_size may not be filled in, and the populated fsr
1485 * value provides information on why the translation aborted, in the format
1486 * of a long-format DFSR/IFSR fault register, with the following caveat:
1487 * the WnR bit is never set (the caller must do this).
1489 * @env: CPUARMState
1490 * @ptw: Current and next stage parameters for the walk.
1491 * @address: virtual address to get physical address for
1492 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
1493 * @result: set on translation success,
1494 * @fi: set to fault info if the translation fails
1496 static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
1497 uint64_t address,
1498 MMUAccessType access_type,
1499 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1501 ARMCPU *cpu = env_archcpu(env);
1502 ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
1503 int32_t level;
1504 ARMVAParameters param;
1505 uint64_t ttbr;
1506 hwaddr descaddr, indexmask, indexmask_grainsize;
1507 uint32_t tableattrs;
1508 target_ulong page_size;
1509 uint64_t attrs;
1510 int32_t stride;
1511 int addrsize, inputsize, outputsize;
1512 uint64_t tcr = regime_tcr(env, mmu_idx);
1513 int ap, xn, pxn;
1514 uint32_t el = regime_el(env, mmu_idx);
1515 uint64_t descaddrmask;
1516 bool aarch64 = arm_el_is_aa64(env, el);
1517 uint64_t descriptor, new_descriptor;
1518 ARMSecuritySpace out_space;
1520 /* TODO: This code does not support shareability levels. */
1521 if (aarch64) {
1522 int ps;
1524 param = aa64_va_parameters(env, address, mmu_idx,
1525 access_type != MMU_INST_FETCH,
1526 !arm_el_is_aa64(env, 1));
1527 level = 0;
1530 * If TxSZ is programmed to a value larger than the maximum,
1531 * or smaller than the effective minimum, it is IMPLEMENTATION
1532 * DEFINED whether we behave as if the field were programmed
1533 * within bounds, or if a level 0 Translation fault is generated.
1535 * With FEAT_LVA, fault on less than minimum becomes required,
1536 * so our choice is to always raise the fault.
1538 if (param.tsz_oob) {
1539 goto do_translation_fault;
1542 addrsize = 64 - 8 * param.tbi;
1543 inputsize = 64 - param.tsz;
1546 * Bound PS by PARANGE to find the effective output address size.
1547 * ID_AA64MMFR0 is a read-only register so values outside of the
1548 * supported mappings can be considered an implementation error.
1550 ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1551 ps = MIN(ps, param.ps);
1552 assert(ps < ARRAY_SIZE(pamax_map));
1553 outputsize = pamax_map[ps];
1556 * With LPA2, the effective output address (OA) size is at most 48 bits
1557 * unless TCR.DS == 1
1559 if (!param.ds && param.gran != Gran64K) {
1560 outputsize = MIN(outputsize, 48);
1562 } else {
1563 param = aa32_va_parameters(env, address, mmu_idx);
1564 level = 1;
1565 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1566 inputsize = addrsize - param.tsz;
1567 outputsize = 40;
1571 * We determined the region when collecting the parameters, but we
1572 * have not yet validated that the address is valid for the region.
1573 * Extract the top bits and verify that they all match select.
1575 * For aa32, if inputsize == addrsize, then we have selected the
1576 * region by exclusion in aa32_va_parameters and there is no more
1577 * validation to do here.
1579 if (inputsize < addrsize) {
1580 target_ulong top_bits = sextract64(address, inputsize,
1581 addrsize - inputsize);
1582 if (-top_bits != param.select) {
1583 /* The gap between the two regions is a Translation fault */
1584 goto do_translation_fault;
1588 stride = arm_granule_bits(param.gran) - 3;
1591 * Note that QEMU ignores shareability and cacheability attributes,
1592 * so we don't need to do anything with the SH, ORGN, IRGN fields
1593 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
1594 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1595 * implement any ASID-like capability so we can ignore it (instead
1596 * we will always flush the TLB any time the ASID is changed).
1598 ttbr = regime_ttbr(env, mmu_idx, param.select);
1601 * Here we should have set up all the parameters for the translation:
1602 * inputsize, ttbr, epd, stride, tbi
1605 if (param.epd) {
1607 * Translation table walk disabled => Translation fault on TLB miss
1608 * Note: This is always 0 on 64-bit EL2 and EL3.
1610 goto do_translation_fault;
1613 if (!regime_is_stage2(mmu_idx)) {
1615 * The starting level depends on the virtual address size (which can
1616 * be up to 48 bits) and the translation granule size. It indicates
1617 * the number of strides (stride bits at a time) needed to
1618 * consume the bits of the input address. In the pseudocode this is:
1619 * level = 4 - RoundUp((inputsize - grainsize) / stride)
1620 * where their 'inputsize' is our 'inputsize', 'grainsize' is
1621 * our 'stride + 3' and 'stride' is our 'stride'.
1622 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1623 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1624 * = 4 - (inputsize - 4) / stride;
1626 level = 4 - (inputsize - 4) / stride;
1627 } else {
1628 int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds,
1629 inputsize, stride);
1630 if (startlevel == INT_MIN) {
1631 level = 0;
1632 goto do_translation_fault;
1634 level = startlevel;
1637 indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1638 indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1640 /* Now we can extract the actual base address from the TTBR */
1641 descaddr = extract64(ttbr, 0, 48);
1644 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1646 * Otherwise, if the base address is out of range, raise AddressSizeFault.
1647 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1648 * but we've just cleared the bits above 47, so simplify the test.
1650 if (outputsize > 48) {
1651 descaddr |= extract64(ttbr, 2, 4) << 48;
1652 } else if (descaddr >> outputsize) {
1653 level = 0;
1654 fi->type = ARMFault_AddressSize;
1655 goto do_fault;
1659 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1660 * and also to mask out CnP (bit 0) which could validly be non-zero.
1662 descaddr &= ~indexmask;
1665 * For AArch32, the address field in the descriptor goes up to bit 39
1666 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
1667 * or an AddressSize fault is raised. So for v8 we extract those SBZ
1668 * bits as part of the address, which will be checked via outputsize.
1669 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1670 * the highest bits of a 52-bit output are placed elsewhere.
1672 if (param.ds) {
1673 descaddrmask = MAKE_64BIT_MASK(0, 50);
1674 } else if (arm_feature(env, ARM_FEATURE_V8)) {
1675 descaddrmask = MAKE_64BIT_MASK(0, 48);
1676 } else {
1677 descaddrmask = MAKE_64BIT_MASK(0, 40);
1679 descaddrmask &= ~indexmask_grainsize;
1680 tableattrs = 0;
1682 next_level:
1683 descaddr |= (address >> (stride * (4 - level))) & indexmask;
1684 descaddr &= ~7ULL;
1687 * Process the NSTable bit from the previous level. This changes
1688 * the table address space and the output space from Secure to
1689 * NonSecure. With RME, the EL3 translation regime does not change
1690 * from Root to NonSecure.
1692 if (ptw->in_space == ARMSS_Secure
1693 && !regime_is_stage2(mmu_idx)
1694 && extract32(tableattrs, 4, 1)) {
1696 * Stage2_S -> Stage2 or Phys_S -> Phys_NS
1697 * Assert the relative order of the secure/non-secure indexes.
1699 QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
1700 QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
1701 ptw->in_ptw_idx += 1;
1702 ptw->in_secure = false;
1703 ptw->in_space = ARMSS_NonSecure;
1706 if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
1707 goto do_fault;
1709 descriptor = arm_ldq_ptw(env, ptw, fi);
1710 if (fi->type != ARMFault_None) {
1711 goto do_fault;
1713 new_descriptor = descriptor;
1715 restart_atomic_update:
1716 if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) {
1717 /* Invalid, or the Reserved level 3 encoding */
1718 goto do_translation_fault;
1721 descaddr = descriptor & descaddrmask;
1724 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1725 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
1726 * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
1727 * raise AddressSizeFault.
1729 if (outputsize > 48) {
1730 if (param.ds) {
1731 descaddr |= extract64(descriptor, 8, 2) << 50;
1732 } else {
1733 descaddr |= extract64(descriptor, 12, 4) << 48;
1735 } else if (descaddr >> outputsize) {
1736 fi->type = ARMFault_AddressSize;
1737 goto do_fault;
1740 if ((descriptor & 2) && (level < 3)) {
1742 * Table entry. The top five bits are attributes which may
1743 * propagate down through lower levels of the table (and
1744 * which are all arranged so that 0 means "no effect", so
1745 * we can gather them up by ORing in the bits at each level).
1747 tableattrs |= extract64(descriptor, 59, 5);
1748 level++;
1749 indexmask = indexmask_grainsize;
1750 goto next_level;
1754 * Block entry at level 1 or 2, or page entry at level 3.
1755 * These are basically the same thing, although the number
1756 * of bits we pull in from the vaddr varies. Note that although
1757 * descaddrmask masks enough of the low bits of the descriptor
1758 * to give a correct page or table address, the address field
1759 * in a block descriptor is smaller; so we need to explicitly
1760 * clear the lower bits here before ORing in the low vaddr bits.
1762 * Afterward, descaddr is the final physical address.
1764 page_size = (1ULL << ((stride * (4 - level)) + 3));
1765 descaddr &= ~(hwaddr)(page_size - 1);
1766 descaddr |= (address & (page_size - 1));
1768 if (likely(!ptw->in_debug)) {
1770 * Access flag.
1771 * If HA is enabled, prepare to update the descriptor below.
1772 * Otherwise, pass the access fault on to software.
1774 if (!(descriptor & (1 << 10))) {
1775 if (param.ha) {
1776 new_descriptor |= 1 << 10; /* AF */
1777 } else {
1778 fi->type = ARMFault_AccessFlag;
1779 goto do_fault;
1784 * Dirty Bit.
1785 * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP
1786 * bit for writeback. The actual write protection test may still be
1787 * overridden by tableattrs, to be merged below.
1789 if (param.hd
1790 && extract64(descriptor, 51, 1) /* DBM */
1791 && access_type == MMU_DATA_STORE) {
1792 if (regime_is_stage2(mmu_idx)) {
1793 new_descriptor |= 1ull << 7; /* set S2AP[1] */
1794 } else {
1795 new_descriptor &= ~(1ull << 7); /* clear AP[2] */
1801 * Extract attributes from the (modified) descriptor, and apply
1802 * table descriptors. Stage 2 table descriptors do not include
1803 * any attribute fields. HPD disables all the table attributes
1804 * except NSTable.
1806 attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
1807 if (!regime_is_stage2(mmu_idx)) {
1808 attrs |= !ptw->in_secure << 5; /* NS */
1809 if (!param.hpd) {
1810 attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
1812 * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1813 * means "force PL1 access only", which means forcing AP[1] to 0.
1815 attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
1816 attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */
1820 ap = extract32(attrs, 6, 2);
1821 out_space = ptw->in_space;
1822 if (regime_is_stage2(mmu_idx)) {
1824 * R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
1825 * The bit remains ignored for other security states.
1826 * R_YMCSL: Executing an insn fetched from non-Realm causes
1827 * a stage2 permission fault.
1829 if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
1830 out_space = ARMSS_NonSecure;
1831 result->f.prot = get_S2prot_noexecute(ap);
1832 } else {
1833 xn = extract64(attrs, 53, 2);
1834 result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
1836 } else {
1837 int nse, ns = extract32(attrs, 5, 1);
1838 switch (out_space) {
1839 case ARMSS_Root:
1841 * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime.
1842 * R_XTYPW: NSE and NS together select the output pa space.
1844 nse = extract32(attrs, 11, 1);
1845 out_space = (nse << 1) | ns;
1846 if (out_space == ARMSS_Secure &&
1847 !cpu_isar_feature(aa64_sel2, cpu)) {
1848 out_space = ARMSS_NonSecure;
1850 break;
1851 case ARMSS_Secure:
1852 if (ns) {
1853 out_space = ARMSS_NonSecure;
1855 break;
1856 case ARMSS_Realm:
1857 switch (mmu_idx) {
1858 case ARMMMUIdx_Stage1_E0:
1859 case ARMMMUIdx_Stage1_E1:
1860 case ARMMMUIdx_Stage1_E1_PAN:
1861 /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */
1862 break;
1863 case ARMMMUIdx_E2:
1864 case ARMMMUIdx_E20_0:
1865 case ARMMMUIdx_E20_2:
1866 case ARMMMUIdx_E20_2_PAN:
1868 * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1,
1869 * NS changes the output to non-secure space.
1871 if (ns) {
1872 out_space = ARMSS_NonSecure;
1874 break;
1875 default:
1876 g_assert_not_reached();
1878 break;
1879 case ARMSS_NonSecure:
1880 /* R_QRMFF: For NonSecure state, the NS bit is RES0. */
1881 break;
1882 default:
1883 g_assert_not_reached();
1885 xn = extract64(attrs, 54, 1);
1886 pxn = extract64(attrs, 53, 1);
1889 * Note that we modified ptw->in_space earlier for NSTable, but
1890 * result->f.attrs retains a copy of the original security space.
1892 result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
1893 result->f.attrs.space, out_space);
1896 if (!(result->f.prot & (1 << access_type))) {
1897 fi->type = ARMFault_Permission;
1898 goto do_fault;
1901 /* If FEAT_HAFDBS has made changes, update the PTE. */
1902 if (new_descriptor != descriptor) {
1903 new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi);
1904 if (fi->type != ARMFault_None) {
1905 goto do_fault;
1908 * I_YZSVV says that if the in-memory descriptor has changed,
1909 * then we must use the information in that new value
1910 * (which might include a different output address, different
1911 * attributes, or generate a fault).
1912 * Restart the handling of the descriptor value from scratch.
1914 if (new_descriptor != descriptor) {
1915 descriptor = new_descriptor;
1916 goto restart_atomic_update;
1920 result->f.attrs.space = out_space;
1921 result->f.attrs.secure = arm_space_is_secure(out_space);
1923 if (regime_is_stage2(mmu_idx)) {
1924 result->cacheattrs.is_s2_format = true;
1925 result->cacheattrs.attrs = extract32(attrs, 2, 4);
1926 } else {
1927 /* Index into MAIR registers for cache attributes */
1928 uint8_t attrindx = extract32(attrs, 2, 3);
1929 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
1930 assert(attrindx <= 7);
1931 result->cacheattrs.is_s2_format = false;
1932 result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
1934 /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
1935 if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
1936 result->f.guarded = extract64(attrs, 50, 1); /* GP */
1941 * For FEAT_LPA2 and effective DS, the SH field in the attributes
1942 * was re-purposed for output address bits. The SH attribute in
1943 * that case comes from TCR_ELx, which we extracted earlier.
1945 if (param.ds) {
1946 result->cacheattrs.shareability = param.sh;
1947 } else {
1948 result->cacheattrs.shareability = extract32(attrs, 8, 2);
1951 result->f.phys_addr = descaddr;
1952 result->f.lg_page_size = ctz64(page_size);
1953 return false;
1955 do_translation_fault:
1956 fi->type = ARMFault_Translation;
1957 do_fault:
1958 fi->level = level;
1959 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
1960 fi->stage2 = fi->s1ptw || regime_is_stage2(mmu_idx);
1961 fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
1962 return true;
1965 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
1966 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1967 bool is_secure, GetPhysAddrResult *result,
1968 ARMMMUFaultInfo *fi)
1970 int n;
1971 uint32_t mask;
1972 uint32_t base;
1973 bool is_user = regime_is_user(env, mmu_idx);
1975 if (regime_translation_disabled(env, mmu_idx, is_secure)) {
1976 /* MPU disabled. */
1977 result->f.phys_addr = address;
1978 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1979 return false;
1982 result->f.phys_addr = address;
1983 for (n = 7; n >= 0; n--) {
1984 base = env->cp15.c6_region[n];
1985 if ((base & 1) == 0) {
1986 continue;
1988 mask = 1 << ((base >> 1) & 0x1f);
1989 /* Keep this shift separate from the above to avoid an
1990 (undefined) << 32. */
1991 mask = (mask << 1) - 1;
1992 if (((base ^ address) & ~mask) == 0) {
1993 break;
1996 if (n < 0) {
1997 fi->type = ARMFault_Background;
1998 return true;
2001 if (access_type == MMU_INST_FETCH) {
2002 mask = env->cp15.pmsav5_insn_ap;
2003 } else {
2004 mask = env->cp15.pmsav5_data_ap;
2006 mask = (mask >> (n * 4)) & 0xf;
2007 switch (mask) {
2008 case 0:
2009 fi->type = ARMFault_Permission;
2010 fi->level = 1;
2011 return true;
2012 case 1:
2013 if (is_user) {
2014 fi->type = ARMFault_Permission;
2015 fi->level = 1;
2016 return true;
2018 result->f.prot = PAGE_READ | PAGE_WRITE;
2019 break;
2020 case 2:
2021 result->f.prot = PAGE_READ;
2022 if (!is_user) {
2023 result->f.prot |= PAGE_WRITE;
2025 break;
2026 case 3:
2027 result->f.prot = PAGE_READ | PAGE_WRITE;
2028 break;
2029 case 5:
2030 if (is_user) {
2031 fi->type = ARMFault_Permission;
2032 fi->level = 1;
2033 return true;
2035 result->f.prot = PAGE_READ;
2036 break;
2037 case 6:
2038 result->f.prot = PAGE_READ;
2039 break;
2040 default:
2041 /* Bad permission. */
2042 fi->type = ARMFault_Permission;
2043 fi->level = 1;
2044 return true;
2046 result->f.prot |= PAGE_EXEC;
2047 return false;
2050 static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
2051 int32_t address, uint8_t *prot)
2053 if (!arm_feature(env, ARM_FEATURE_M)) {
2054 *prot = PAGE_READ | PAGE_WRITE;
2055 switch (address) {
2056 case 0xF0000000 ... 0xFFFFFFFF:
2057 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
2058 /* hivecs execing is ok */
2059 *prot |= PAGE_EXEC;
2061 break;
2062 case 0x00000000 ... 0x7FFFFFFF:
2063 *prot |= PAGE_EXEC;
2064 break;
2066 } else {
2067 /* Default system address map for M profile cores.
2068 * The architecture specifies which regions are execute-never;
2069 * at the MPU level no other checks are defined.
2071 switch (address) {
2072 case 0x00000000 ... 0x1fffffff: /* ROM */
2073 case 0x20000000 ... 0x3fffffff: /* SRAM */
2074 case 0x60000000 ... 0x7fffffff: /* RAM */
2075 case 0x80000000 ... 0x9fffffff: /* RAM */
2076 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2077 break;
2078 case 0x40000000 ... 0x5fffffff: /* Peripheral */
2079 case 0xa0000000 ... 0xbfffffff: /* Device */
2080 case 0xc0000000 ... 0xdfffffff: /* Device */
2081 case 0xe0000000 ... 0xffffffff: /* System */
2082 *prot = PAGE_READ | PAGE_WRITE;
2083 break;
2084 default:
2085 g_assert_not_reached();
2090 static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
2092 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
2093 return arm_feature(env, ARM_FEATURE_M) &&
2094 extract32(address, 20, 12) == 0xe00;
2097 static bool m_is_system_region(CPUARMState *env, uint32_t address)
2100 * True if address is in the M profile system region
2101 * 0xe0000000 - 0xffffffff
2103 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
2106 static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
2107 bool is_secure, bool is_user)
2110 * Return true if we should use the default memory map as a
2111 * "background" region if there are no hits against any MPU regions.
2113 CPUARMState *env = &cpu->env;
2115 if (is_user) {
2116 return false;
2119 if (arm_feature(env, ARM_FEATURE_M)) {
2120 return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
2123 if (mmu_idx == ARMMMUIdx_Stage2) {
2124 return false;
2127 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
2130 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
2131 MMUAccessType access_type, ARMMMUIdx mmu_idx,
2132 bool secure, GetPhysAddrResult *result,
2133 ARMMMUFaultInfo *fi)
2135 ARMCPU *cpu = env_archcpu(env);
2136 int n;
2137 bool is_user = regime_is_user(env, mmu_idx);
2139 result->f.phys_addr = address;
2140 result->f.lg_page_size = TARGET_PAGE_BITS;
2141 result->f.prot = 0;
2143 if (regime_translation_disabled(env, mmu_idx, secure) ||
2144 m_is_ppb_region(env, address)) {
2146 * MPU disabled or M profile PPB access: use default memory map.
2147 * The other case which uses the default memory map in the
2148 * v7M ARM ARM pseudocode is exception vector reads from the vector
2149 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
2150 * which always does a direct read using address_space_ldl(), rather
2151 * than going via this function, so we don't need to check that here.
2153 get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
2154 } else { /* MPU enabled */
2155 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
2156 /* region search */
2157 uint32_t base = env->pmsav7.drbar[n];
2158 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
2159 uint32_t rmask;
2160 bool srdis = false;
2162 if (!(env->pmsav7.drsr[n] & 0x1)) {
2163 continue;
2166 if (!rsize) {
2167 qemu_log_mask(LOG_GUEST_ERROR,
2168 "DRSR[%d]: Rsize field cannot be 0\n", n);
2169 continue;
2171 rsize++;
2172 rmask = (1ull << rsize) - 1;
2174 if (base & rmask) {
2175 qemu_log_mask(LOG_GUEST_ERROR,
2176 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
2177 "to DRSR region size, mask = 0x%" PRIx32 "\n",
2178 n, base, rmask);
2179 continue;
2182 if (address < base || address > base + rmask) {
2184 * Address not in this region. We must check whether the
2185 * region covers addresses in the same page as our address.
2186 * In that case we must not report a size that covers the
2187 * whole page for a subsequent hit against a different MPU
2188 * region or the background region, because it would result in
2189 * incorrect TLB hits for subsequent accesses to addresses that
2190 * are in this MPU region.
2192 if (ranges_overlap(base, rmask,
2193 address & TARGET_PAGE_MASK,
2194 TARGET_PAGE_SIZE)) {
2195 result->f.lg_page_size = 0;
2197 continue;
2200 /* Region matched */
2202 if (rsize >= 8) { /* no subregions for regions < 256 bytes */
2203 int i, snd;
2204 uint32_t srdis_mask;
2206 rsize -= 3; /* sub region size (power of 2) */
2207 snd = ((address - base) >> rsize) & 0x7;
2208 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
2210 srdis_mask = srdis ? 0x3 : 0x0;
2211 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
2213 * This will check in groups of 2, 4 and then 8, whether
2214 * the subregion bits are consistent. rsize is incremented
2215 * back up to give the region size, considering consistent
2216 * adjacent subregions as one region. Stop testing if rsize
2217 * is already big enough for an entire QEMU page.
2219 int snd_rounded = snd & ~(i - 1);
2220 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
2221 snd_rounded + 8, i);
2222 if (srdis_mask ^ srdis_multi) {
2223 break;
2225 srdis_mask = (srdis_mask << i) | srdis_mask;
2226 rsize++;
2229 if (srdis) {
2230 continue;
2232 if (rsize < TARGET_PAGE_BITS) {
2233 result->f.lg_page_size = rsize;
2235 break;
2238 if (n == -1) { /* no hits */
2239 if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
2240 /* background fault */
2241 fi->type = ARMFault_Background;
2242 return true;
2244 get_phys_addr_pmsav7_default(env, mmu_idx, address,
2245 &result->f.prot);
2246 } else { /* a MPU hit! */
2247 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
2248 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
2250 if (m_is_system_region(env, address)) {
2251 /* System space is always execute never */
2252 xn = 1;
2255 if (is_user) { /* User mode AP bit decoding */
2256 switch (ap) {
2257 case 0:
2258 case 1:
2259 case 5:
2260 break; /* no access */
2261 case 3:
2262 result->f.prot |= PAGE_WRITE;
2263 /* fall through */
2264 case 2:
2265 case 6:
2266 result->f.prot |= PAGE_READ | PAGE_EXEC;
2267 break;
2268 case 7:
2269 /* for v7M, same as 6; for R profile a reserved value */
2270 if (arm_feature(env, ARM_FEATURE_M)) {
2271 result->f.prot |= PAGE_READ | PAGE_EXEC;
2272 break;
2274 /* fall through */
2275 default:
2276 qemu_log_mask(LOG_GUEST_ERROR,
2277 "DRACR[%d]: Bad value for AP bits: 0x%"
2278 PRIx32 "\n", n, ap);
2280 } else { /* Priv. mode AP bits decoding */
2281 switch (ap) {
2282 case 0:
2283 break; /* no access */
2284 case 1:
2285 case 2:
2286 case 3:
2287 result->f.prot |= PAGE_WRITE;
2288 /* fall through */
2289 case 5:
2290 case 6:
2291 result->f.prot |= PAGE_READ | PAGE_EXEC;
2292 break;
2293 case 7:
2294 /* for v7M, same as 6; for R profile a reserved value */
2295 if (arm_feature(env, ARM_FEATURE_M)) {
2296 result->f.prot |= PAGE_READ | PAGE_EXEC;
2297 break;
2299 /* fall through */
2300 default:
2301 qemu_log_mask(LOG_GUEST_ERROR,
2302 "DRACR[%d]: Bad value for AP bits: 0x%"
2303 PRIx32 "\n", n, ap);
2307 /* execute never */
2308 if (xn) {
2309 result->f.prot &= ~PAGE_EXEC;
2314 fi->type = ARMFault_Permission;
2315 fi->level = 1;
2316 return !(result->f.prot & (1 << access_type));
2319 static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
2320 uint32_t secure)
2322 if (regime_el(env, mmu_idx) == 2) {
2323 return env->pmsav8.hprbar;
2324 } else {
2325 return env->pmsav8.rbar[secure];
2329 static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
2330 uint32_t secure)
2332 if (regime_el(env, mmu_idx) == 2) {
2333 return env->pmsav8.hprlar;
2334 } else {
2335 return env->pmsav8.rlar[secure];
2339 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
2340 MMUAccessType access_type, ARMMMUIdx mmu_idx,
2341 bool secure, GetPhysAddrResult *result,
2342 ARMMMUFaultInfo *fi, uint32_t *mregion)
2345 * Perform a PMSAv8 MPU lookup (without also doing the SAU check
2346 * that a full phys-to-virt translation does).
2347 * mregion is (if not NULL) set to the region number which matched,
2348 * or -1 if no region number is returned (MPU off, address did not
2349 * hit a region, address hit in multiple regions).
2350 * If the region hit doesn't cover the entire TARGET_PAGE the address
2351 * is within, then we set the result page_size to 1 to force the
2352 * memory system to use a subpage.
2354 ARMCPU *cpu = env_archcpu(env);
2355 bool is_user = regime_is_user(env, mmu_idx);
2356 int n;
2357 int matchregion = -1;
2358 bool hit = false;
2359 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
2360 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
2361 int region_counter;
2363 if (regime_el(env, mmu_idx) == 2) {
2364 region_counter = cpu->pmsav8r_hdregion;
2365 } else {
2366 region_counter = cpu->pmsav7_dregion;
2369 result->f.lg_page_size = TARGET_PAGE_BITS;
2370 result->f.phys_addr = address;
2371 result->f.prot = 0;
2372 if (mregion) {
2373 *mregion = -1;
2376 if (mmu_idx == ARMMMUIdx_Stage2) {
2377 fi->stage2 = true;
2381 * Unlike the ARM ARM pseudocode, we don't need to check whether this
2382 * was an exception vector read from the vector table (which is always
2383 * done using the default system address map), because those accesses
2384 * are done in arm_v7m_load_vector(), which always does a direct
2385 * read using address_space_ldl(), rather than going via this function.
2387 if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
2388 hit = true;
2389 } else if (m_is_ppb_region(env, address)) {
2390 hit = true;
2391 } else {
2392 if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
2393 hit = true;
2396 uint32_t bitmask;
2397 if (arm_feature(env, ARM_FEATURE_M)) {
2398 bitmask = 0x1f;
2399 } else {
2400 bitmask = 0x3f;
2401 fi->level = 0;
2404 for (n = region_counter - 1; n >= 0; n--) {
2405 /* region search */
2407 * Note that the base address is bits [31:x] from the register
2408 * with bits [x-1:0] all zeroes, but the limit address is bits
2409 * [31:x] from the register with bits [x:0] all ones. Where x is
2410 * 5 for Cortex-M and 6 for Cortex-R
2412 uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask;
2413 uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask;
2415 if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) {
2416 /* Region disabled */
2417 continue;
2420 if (address < base || address > limit) {
2422 * Address not in this region. We must check whether the
2423 * region covers addresses in the same page as our address.
2424 * In that case we must not report a size that covers the
2425 * whole page for a subsequent hit against a different MPU
2426 * region or the background region, because it would result in
2427 * incorrect TLB hits for subsequent accesses to addresses that
2428 * are in this MPU region.
2430 if (limit >= base &&
2431 ranges_overlap(base, limit - base + 1,
2432 addr_page_base,
2433 TARGET_PAGE_SIZE)) {
2434 result->f.lg_page_size = 0;
2436 continue;
2439 if (base > addr_page_base || limit < addr_page_limit) {
2440 result->f.lg_page_size = 0;
2443 if (matchregion != -1) {
2445 * Multiple regions match -- always a failure (unlike
2446 * PMSAv7 where highest-numbered-region wins)
2448 fi->type = ARMFault_Permission;
2449 if (arm_feature(env, ARM_FEATURE_M)) {
2450 fi->level = 1;
2452 return true;
2455 matchregion = n;
2456 hit = true;
2460 if (!hit) {
2461 if (arm_feature(env, ARM_FEATURE_M)) {
2462 fi->type = ARMFault_Background;
2463 } else {
2464 fi->type = ARMFault_Permission;
2466 return true;
2469 if (matchregion == -1) {
2470 /* hit using the background region */
2471 get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
2472 } else {
2473 uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion];
2474 uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion];
2475 uint32_t ap = extract32(matched_rbar, 1, 2);
2476 uint32_t xn = extract32(matched_rbar, 0, 1);
2477 bool pxn = false;
2479 if (arm_feature(env, ARM_FEATURE_V8_1M)) {
2480 pxn = extract32(matched_rlar, 4, 1);
2483 if (m_is_system_region(env, address)) {
2484 /* System space is always execute never */
2485 xn = 1;
2488 if (regime_el(env, mmu_idx) == 2) {
2489 result->f.prot = simple_ap_to_rw_prot_is_user(ap,
2490 mmu_idx != ARMMMUIdx_E2);
2491 } else {
2492 result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
2495 if (!arm_feature(env, ARM_FEATURE_M)) {
2496 uint8_t attrindx = extract32(matched_rlar, 1, 3);
2497 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
2498 uint8_t sh = extract32(matched_rlar, 3, 2);
2500 if (regime_sctlr(env, mmu_idx) & SCTLR_WXN &&
2501 result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) {
2502 xn = 0x1;
2505 if ((regime_el(env, mmu_idx) == 1) &&
2506 regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) {
2507 pxn = 0x1;
2510 result->cacheattrs.is_s2_format = false;
2511 result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
2512 result->cacheattrs.shareability = sh;
2515 if (result->f.prot && !xn && !(pxn && !is_user)) {
2516 result->f.prot |= PAGE_EXEC;
2519 if (mregion) {
2520 *mregion = matchregion;
2524 fi->type = ARMFault_Permission;
2525 if (arm_feature(env, ARM_FEATURE_M)) {
2526 fi->level = 1;
2528 return !(result->f.prot & (1 << access_type));
2531 static bool v8m_is_sau_exempt(CPUARMState *env,
2532 uint32_t address, MMUAccessType access_type)
2535 * The architecture specifies that certain address ranges are
2536 * exempt from v8M SAU/IDAU checks.
2538 return
2539 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
2540 (address >= 0xe0000000 && address <= 0xe0002fff) ||
2541 (address >= 0xe000e000 && address <= 0xe000efff) ||
2542 (address >= 0xe002e000 && address <= 0xe002efff) ||
2543 (address >= 0xe0040000 && address <= 0xe0041fff) ||
2544 (address >= 0xe00ff000 && address <= 0xe00fffff);
2547 void v8m_security_lookup(CPUARMState *env, uint32_t address,
2548 MMUAccessType access_type, ARMMMUIdx mmu_idx,
2549 bool is_secure, V8M_SAttributes *sattrs)
2552 * Look up the security attributes for this address. Compare the
2553 * pseudocode SecurityCheck() function.
2554 * We assume the caller has zero-initialized *sattrs.
2556 ARMCPU *cpu = env_archcpu(env);
2557 int r;
2558 bool idau_exempt = false, idau_ns = true, idau_nsc = true;
2559 int idau_region = IREGION_NOTVALID;
2560 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
2561 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
2563 if (cpu->idau) {
2564 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
2565 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
2567 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
2568 &idau_nsc);
2571 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
2572 /* 0xf0000000..0xffffffff is always S for insn fetches */
2573 return;
2576 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
2577 sattrs->ns = !is_secure;
2578 return;
2581 if (idau_region != IREGION_NOTVALID) {
2582 sattrs->irvalid = true;
2583 sattrs->iregion = idau_region;
2586 switch (env->sau.ctrl & 3) {
2587 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
2588 break;
2589 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
2590 sattrs->ns = true;
2591 break;
2592 default: /* SAU.ENABLE == 1 */
2593 for (r = 0; r < cpu->sau_sregion; r++) {
2594 if (env->sau.rlar[r] & 1) {
2595 uint32_t base = env->sau.rbar[r] & ~0x1f;
2596 uint32_t limit = env->sau.rlar[r] | 0x1f;
2598 if (base <= address && limit >= address) {
2599 if (base > addr_page_base || limit < addr_page_limit) {
2600 sattrs->subpage = true;
2602 if (sattrs->srvalid) {
2604 * If we hit in more than one region then we must report
2605 * as Secure, not NS-Callable, with no valid region
2606 * number info.
2608 sattrs->ns = false;
2609 sattrs->nsc = false;
2610 sattrs->sregion = 0;
2611 sattrs->srvalid = false;
2612 break;
2613 } else {
2614 if (env->sau.rlar[r] & 2) {
2615 sattrs->nsc = true;
2616 } else {
2617 sattrs->ns = true;
2619 sattrs->srvalid = true;
2620 sattrs->sregion = r;
2622 } else {
2624 * Address not in this region. We must check whether the
2625 * region covers addresses in the same page as our address.
2626 * In that case we must not report a size that covers the
2627 * whole page for a subsequent hit against a different MPU
2628 * region or the background region, because it would result
2629 * in incorrect TLB hits for subsequent accesses to
2630 * addresses that are in this MPU region.
2632 if (limit >= base &&
2633 ranges_overlap(base, limit - base + 1,
2634 addr_page_base,
2635 TARGET_PAGE_SIZE)) {
2636 sattrs->subpage = true;
2641 break;
2645 * The IDAU will override the SAU lookup results if it specifies
2646 * higher security than the SAU does.
2648 if (!idau_ns) {
2649 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
2650 sattrs->ns = false;
2651 sattrs->nsc = idau_nsc;
2656 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
2657 MMUAccessType access_type, ARMMMUIdx mmu_idx,
2658 bool secure, GetPhysAddrResult *result,
2659 ARMMMUFaultInfo *fi)
2661 V8M_SAttributes sattrs = {};
2662 bool ret;
2664 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2665 v8m_security_lookup(env, address, access_type, mmu_idx,
2666 secure, &sattrs);
2667 if (access_type == MMU_INST_FETCH) {
2669 * Instruction fetches always use the MMU bank and the
2670 * transaction attribute determined by the fetch address,
2671 * regardless of CPU state. This is painful for QEMU
2672 * to handle, because it would mean we need to encode
2673 * into the mmu_idx not just the (user, negpri) information
2674 * for the current security state but also that for the
2675 * other security state, which would balloon the number
2676 * of mmu_idx values needed alarmingly.
2677 * Fortunately we can avoid this because it's not actually
2678 * possible to arbitrarily execute code from memory with
2679 * the wrong security attribute: it will always generate
2680 * an exception of some kind or another, apart from the
2681 * special case of an NS CPU executing an SG instruction
2682 * in S&NSC memory. So we always just fail the translation
2683 * here and sort things out in the exception handler
2684 * (including possibly emulating an SG instruction).
2686 if (sattrs.ns != !secure) {
2687 if (sattrs.nsc) {
2688 fi->type = ARMFault_QEMU_NSCExec;
2689 } else {
2690 fi->type = ARMFault_QEMU_SFault;
2692 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2693 result->f.phys_addr = address;
2694 result->f.prot = 0;
2695 return true;
2697 } else {
2699 * For data accesses we always use the MMU bank indicated
2700 * by the current CPU state, but the security attributes
2701 * might downgrade a secure access to nonsecure.
2703 if (sattrs.ns) {
2704 result->f.attrs.secure = false;
2705 result->f.attrs.space = ARMSS_NonSecure;
2706 } else if (!secure) {
2708 * NS access to S memory must fault.
2709 * Architecturally we should first check whether the
2710 * MPU information for this address indicates that we
2711 * are doing an unaligned access to Device memory, which
2712 * should generate a UsageFault instead. QEMU does not
2713 * currently check for that kind of unaligned access though.
2714 * If we added it we would need to do so as a special case
2715 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2717 fi->type = ARMFault_QEMU_SFault;
2718 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2719 result->f.phys_addr = address;
2720 result->f.prot = 0;
2721 return true;
2726 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
2727 result, fi, NULL);
2728 if (sattrs.subpage) {
2729 result->f.lg_page_size = 0;
2731 return ret;
2735 * Translate from the 4-bit stage 2 representation of
2736 * memory attributes (without cache-allocation hints) to
2737 * the 8-bit representation of the stage 1 MAIR registers
2738 * (which includes allocation hints).
2740 * ref: shared/translation/attrs/S2AttrDecode()
2741 * .../S2ConvertAttrsHints()
2743 static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
2745 uint8_t hiattr = extract32(s2attrs, 2, 2);
2746 uint8_t loattr = extract32(s2attrs, 0, 2);
2747 uint8_t hihint = 0, lohint = 0;
2749 if (hiattr != 0) { /* normal memory */
2750 if (hcr & HCR_CD) { /* cache disabled */
2751 hiattr = loattr = 1; /* non-cacheable */
2752 } else {
2753 if (hiattr != 1) { /* Write-through or write-back */
2754 hihint = 3; /* RW allocate */
2756 if (loattr != 1) { /* Write-through or write-back */
2757 lohint = 3; /* RW allocate */
2762 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2766 * Combine either inner or outer cacheability attributes for normal
2767 * memory, according to table D4-42 and pseudocode procedure
2768 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2770 * NB: only stage 1 includes allocation hints (RW bits), leading to
2771 * some asymmetry.
2773 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2775 if (s1 == 4 || s2 == 4) {
2776 /* non-cacheable has precedence */
2777 return 4;
2778 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2779 /* stage 1 write-through takes precedence */
2780 return s1;
2781 } else if (extract32(s2, 2, 2) == 2) {
2782 /* stage 2 write-through takes precedence, but the allocation hint
2783 * is still taken from stage 1
2785 return (2 << 2) | extract32(s1, 0, 2);
2786 } else { /* write-back */
2787 return s1;
2792 * Combine the memory type and cacheability attributes of
2793 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2794 * combined attributes in MAIR_EL1 format.
2796 static uint8_t combined_attrs_nofwb(uint64_t hcr,
2797 ARMCacheAttrs s1, ARMCacheAttrs s2)
2799 uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2801 if (s2.is_s2_format) {
2802 s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
2803 } else {
2804 s2_mair_attrs = s2.attrs;
2807 s1lo = extract32(s1.attrs, 0, 4);
2808 s2lo = extract32(s2_mair_attrs, 0, 4);
2809 s1hi = extract32(s1.attrs, 4, 4);
2810 s2hi = extract32(s2_mair_attrs, 4, 4);
2812 /* Combine memory type and cacheability attributes */
2813 if (s1hi == 0 || s2hi == 0) {
2814 /* Device has precedence over normal */
2815 if (s1lo == 0 || s2lo == 0) {
2816 /* nGnRnE has precedence over anything */
2817 ret_attrs = 0;
2818 } else if (s1lo == 4 || s2lo == 4) {
2819 /* non-Reordering has precedence over Reordering */
2820 ret_attrs = 4; /* nGnRE */
2821 } else if (s1lo == 8 || s2lo == 8) {
2822 /* non-Gathering has precedence over Gathering */
2823 ret_attrs = 8; /* nGRE */
2824 } else {
2825 ret_attrs = 0xc; /* GRE */
2827 } else { /* Normal memory */
2828 /* Outer/inner cacheability combine independently */
2829 ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2830 | combine_cacheattr_nibble(s1lo, s2lo);
2832 return ret_attrs;
2835 static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2838 * Given the 4 bits specifying the outer or inner cacheability
2839 * in MAIR format, return a value specifying Normal Write-Back,
2840 * with the allocation and transient hints taken from the input
2841 * if the input specified some kind of cacheable attribute.
2843 if (attr == 0 || attr == 4) {
2845 * 0 == an UNPREDICTABLE encoding
2846 * 4 == Non-cacheable
2847 * Either way, force Write-Back RW allocate non-transient
2849 return 0xf;
2851 /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2852 return attr | 4;
2856 * Combine the memory type and cacheability attributes of
2857 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2858 * combined attributes in MAIR_EL1 format.
2860 static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
2862 assert(s2.is_s2_format && !s1.is_s2_format);
2864 switch (s2.attrs) {
2865 case 7:
2866 /* Use stage 1 attributes */
2867 return s1.attrs;
2868 case 6:
2870 * Force Normal Write-Back. Note that if S1 is Normal cacheable
2871 * then we take the allocation hints from it; otherwise it is
2872 * RW allocate, non-transient.
2874 if ((s1.attrs & 0xf0) == 0) {
2875 /* S1 is Device */
2876 return 0xff;
2878 /* Need to check the Inner and Outer nibbles separately */
2879 return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2880 force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2881 case 5:
2882 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2883 if ((s1.attrs & 0xf0) == 0) {
2884 return s1.attrs;
2886 return 0x44;
2887 case 0 ... 3:
2888 /* Force Device, of subtype specified by S2 */
2889 return s2.attrs << 2;
2890 default:
2892 * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2893 * arbitrarily force Device.
2895 return 0;
2900 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2901 * and CombineS1S2Desc()
2903 * @env: CPUARMState
2904 * @s1: Attributes from stage 1 walk
2905 * @s2: Attributes from stage 2 walk
2907 static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
2908 ARMCacheAttrs s1, ARMCacheAttrs s2)
2910 ARMCacheAttrs ret;
2911 bool tagged = false;
2913 assert(!s1.is_s2_format);
2914 ret.is_s2_format = false;
2915 ret.guarded = s1.guarded;
2917 if (s1.attrs == 0xf0) {
2918 tagged = true;
2919 s1.attrs = 0xff;
2922 /* Combine shareability attributes (table D4-43) */
2923 if (s1.shareability == 2 || s2.shareability == 2) {
2924 /* if either are outer-shareable, the result is outer-shareable */
2925 ret.shareability = 2;
2926 } else if (s1.shareability == 3 || s2.shareability == 3) {
2927 /* if either are inner-shareable, the result is inner-shareable */
2928 ret.shareability = 3;
2929 } else {
2930 /* both non-shareable */
2931 ret.shareability = 0;
2934 /* Combine memory type and cacheability attributes */
2935 if (hcr & HCR_FWB) {
2936 ret.attrs = combined_attrs_fwb(s1, s2);
2937 } else {
2938 ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
2942 * Any location for which the resultant memory type is any
2943 * type of Device memory is always treated as Outer Shareable.
2944 * Any location for which the resultant memory type is Normal
2945 * Inner Non-cacheable, Outer Non-cacheable is always treated
2946 * as Outer Shareable.
2947 * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2949 if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2950 ret.shareability = 2;
2953 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2954 if (tagged && ret.attrs == 0xff) {
2955 ret.attrs = 0xf0;
2958 return ret;
2962 * MMU disabled. S1 addresses within aa64 translation regimes are
2963 * still checked for bounds -- see AArch64.S1DisabledOutput().
2965 static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
2966 MMUAccessType access_type,
2967 ARMMMUIdx mmu_idx, bool is_secure,
2968 GetPhysAddrResult *result,
2969 ARMMMUFaultInfo *fi)
2971 uint8_t memattr = 0x00; /* Device nGnRnE */
2972 uint8_t shareability = 0; /* non-shareable */
2973 int r_el;
2975 switch (mmu_idx) {
2976 case ARMMMUIdx_Stage2:
2977 case ARMMMUIdx_Stage2_S:
2978 case ARMMMUIdx_Phys_S:
2979 case ARMMMUIdx_Phys_NS:
2980 case ARMMMUIdx_Phys_Root:
2981 case ARMMMUIdx_Phys_Realm:
2982 break;
2984 default:
2985 r_el = regime_el(env, mmu_idx);
2986 if (arm_el_is_aa64(env, r_el)) {
2987 int pamax = arm_pamax(env_archcpu(env));
2988 uint64_t tcr = env->cp15.tcr_el[r_el];
2989 int addrtop, tbi;
2991 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2992 if (access_type == MMU_INST_FETCH) {
2993 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2995 tbi = (tbi >> extract64(address, 55, 1)) & 1;
2996 addrtop = (tbi ? 55 : 63);
2998 if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2999 fi->type = ARMFault_AddressSize;
3000 fi->level = 0;
3001 fi->stage2 = false;
3002 return 1;
3006 * When TBI is disabled, we've just validated that all of the
3007 * bits above PAMax are zero, so logically we only need to
3008 * clear the top byte for TBI. But it's clearer to follow
3009 * the pseudocode set of addrdesc.paddress.
3011 address = extract64(address, 0, 52);
3014 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
3015 if (r_el == 1) {
3016 uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
3017 if (hcr & HCR_DC) {
3018 if (hcr & HCR_DCT) {
3019 memattr = 0xf0; /* Tagged, Normal, WB, RWA */
3020 } else {
3021 memattr = 0xff; /* Normal, WB, RWA */
3025 if (memattr == 0 && access_type == MMU_INST_FETCH) {
3026 if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
3027 memattr = 0xee; /* Normal, WT, RA, NT */
3028 } else {
3029 memattr = 0x44; /* Normal, NC, No */
3031 shareability = 2; /* outer shareable */
3033 result->cacheattrs.is_s2_format = false;
3034 break;
3037 result->f.phys_addr = address;
3038 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3039 result->f.lg_page_size = TARGET_PAGE_BITS;
3040 result->cacheattrs.shareability = shareability;
3041 result->cacheattrs.attrs = memattr;
3042 return false;
3045 static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
3046 target_ulong address,
3047 MMUAccessType access_type,
3048 GetPhysAddrResult *result,
3049 ARMMMUFaultInfo *fi)
3051 hwaddr ipa;
3052 int s1_prot, s1_lgpgsz;
3053 bool is_secure = ptw->in_secure;
3054 bool ret, ipa_secure;
3055 ARMCacheAttrs cacheattrs1;
3056 ARMSecuritySpace ipa_space;
3057 uint64_t hcr;
3059 ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi);
3061 /* If S1 fails, return early. */
3062 if (ret) {
3063 return ret;
3066 ipa = result->f.phys_addr;
3067 ipa_secure = result->f.attrs.secure;
3068 ipa_space = result->f.attrs.space;
3070 ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
3071 ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
3072 ptw->in_secure = ipa_secure;
3073 ptw->in_space = ipa_space;
3074 ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
3077 * S1 is done, now do S2 translation.
3078 * Save the stage1 results so that we may merge prot and cacheattrs later.
3080 s1_prot = result->f.prot;
3081 s1_lgpgsz = result->f.lg_page_size;
3082 cacheattrs1 = result->cacheattrs;
3083 memset(result, 0, sizeof(*result));
3085 ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi);
3086 fi->s2addr = ipa;
3088 /* Combine the S1 and S2 perms. */
3089 result->f.prot &= s1_prot;
3091 /* If S2 fails, return early. */
3092 if (ret) {
3093 return ret;
3097 * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE,
3098 * this means "don't put this in the TLB"; in this case, return a
3099 * result with lg_page_size == 0 to achieve that. Otherwise,
3100 * use the maximum of the S1 & S2 page size, so that invalidation
3101 * of pages > TARGET_PAGE_SIZE works correctly. (This works even though
3102 * we know the combined result permissions etc only cover the minimum
3103 * of the S1 and S2 page size, because we know that the common TLB code
3104 * never actually creates TLB entries bigger than TARGET_PAGE_SIZE,
3105 * and passing a larger page size value only affects invalidations.)
3107 if (result->f.lg_page_size < TARGET_PAGE_BITS ||
3108 s1_lgpgsz < TARGET_PAGE_BITS) {
3109 result->f.lg_page_size = 0;
3110 } else if (result->f.lg_page_size < s1_lgpgsz) {
3111 result->f.lg_page_size = s1_lgpgsz;
3114 /* Combine the S1 and S2 cache attributes. */
3115 hcr = arm_hcr_el2_eff_secstate(env, is_secure);
3116 if (hcr & HCR_DC) {
3118 * HCR.DC forces the first stage attributes to
3119 * Normal Non-Shareable,
3120 * Inner Write-Back Read-Allocate Write-Allocate,
3121 * Outer Write-Back Read-Allocate Write-Allocate.
3122 * Do not overwrite Tagged within attrs.
3124 if (cacheattrs1.attrs != 0xf0) {
3125 cacheattrs1.attrs = 0xff;
3127 cacheattrs1.shareability = 0;
3129 result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
3130 result->cacheattrs);
3133 * Check if IPA translates to secure or non-secure PA space.
3134 * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
3136 result->f.attrs.secure =
3137 (is_secure
3138 && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
3139 && (ipa_secure
3140 || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
3142 return false;
3145 static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
3146 target_ulong address,
3147 MMUAccessType access_type,
3148 GetPhysAddrResult *result,
3149 ARMMMUFaultInfo *fi)
3151 ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
3152 bool is_secure = ptw->in_secure;
3153 ARMMMUIdx s1_mmu_idx;
3156 * The page table entries may downgrade Secure to NonSecure, but
3157 * cannot upgrade a NonSecure translation regime's attributes
3158 * to Secure or Realm.
3160 result->f.attrs.secure = is_secure;
3161 result->f.attrs.space = ptw->in_space;
3163 switch (mmu_idx) {
3164 case ARMMMUIdx_Phys_S:
3165 case ARMMMUIdx_Phys_NS:
3166 case ARMMMUIdx_Phys_Root:
3167 case ARMMMUIdx_Phys_Realm:
3168 /* Checking Phys early avoids special casing later vs regime_el. */
3169 return get_phys_addr_disabled(env, address, access_type, mmu_idx,
3170 is_secure, result, fi);
3172 case ARMMMUIdx_Stage1_E0:
3173 case ARMMMUIdx_Stage1_E1:
3174 case ARMMMUIdx_Stage1_E1_PAN:
3175 /* First stage lookup uses second stage for ptw. */
3176 ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
3177 break;
3179 case ARMMMUIdx_Stage2:
3180 case ARMMMUIdx_Stage2_S:
3182 * Second stage lookup uses physical for ptw; whether this is S or
3183 * NS may depend on the SW/NSW bits if this is a stage 2 lookup for
3184 * the Secure EL2&0 regime.
3186 ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx);
3187 break;
3189 case ARMMMUIdx_E10_0:
3190 s1_mmu_idx = ARMMMUIdx_Stage1_E0;
3191 goto do_twostage;
3192 case ARMMMUIdx_E10_1:
3193 s1_mmu_idx = ARMMMUIdx_Stage1_E1;
3194 goto do_twostage;
3195 case ARMMMUIdx_E10_1_PAN:
3196 s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
3197 do_twostage:
3199 * Call ourselves recursively to do the stage 1 and then stage 2
3200 * translations if mmu_idx is a two-stage regime, and EL2 present.
3201 * Otherwise, a stage1+stage2 translation is just stage 1.
3203 ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
3204 if (arm_feature(env, ARM_FEATURE_EL2) &&
3205 !regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
3206 return get_phys_addr_twostage(env, ptw, address, access_type,
3207 result, fi);
3209 /* fall through */
3211 default:
3212 /* Single stage uses physical for ptw. */
3213 ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space);
3214 break;
3217 result->f.attrs.user = regime_is_user(env, mmu_idx);
3220 * Fast Context Switch Extension. This doesn't exist at all in v8.
3221 * In v7 and earlier it affects all stage 1 translations.
3223 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
3224 && !arm_feature(env, ARM_FEATURE_V8)) {
3225 if (regime_el(env, mmu_idx) == 3) {
3226 address += env->cp15.fcseidr_s;
3227 } else {
3228 address += env->cp15.fcseidr_ns;
3232 if (arm_feature(env, ARM_FEATURE_PMSA)) {
3233 bool ret;
3234 result->f.lg_page_size = TARGET_PAGE_BITS;
3236 if (arm_feature(env, ARM_FEATURE_V8)) {
3237 /* PMSAv8 */
3238 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
3239 is_secure, result, fi);
3240 } else if (arm_feature(env, ARM_FEATURE_V7)) {
3241 /* PMSAv7 */
3242 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
3243 is_secure, result, fi);
3244 } else {
3245 /* Pre-v7 MPU */
3246 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
3247 is_secure, result, fi);
3249 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
3250 " mmu_idx %u -> %s (prot %c%c%c)\n",
3251 access_type == MMU_DATA_LOAD ? "reading" :
3252 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
3253 (uint32_t)address, mmu_idx,
3254 ret ? "Miss" : "Hit",
3255 result->f.prot & PAGE_READ ? 'r' : '-',
3256 result->f.prot & PAGE_WRITE ? 'w' : '-',
3257 result->f.prot & PAGE_EXEC ? 'x' : '-');
3259 return ret;
3262 /* Definitely a real MMU, not an MPU */
3264 if (regime_translation_disabled(env, mmu_idx, is_secure)) {
3265 return get_phys_addr_disabled(env, address, access_type, mmu_idx,
3266 is_secure, result, fi);
3269 if (regime_using_lpae_format(env, mmu_idx)) {
3270 return get_phys_addr_lpae(env, ptw, address, access_type, result, fi);
3271 } else if (arm_feature(env, ARM_FEATURE_V7) ||
3272 regime_sctlr(env, mmu_idx) & SCTLR_XP) {
3273 return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
3274 } else {
3275 return get_phys_addr_v5(env, ptw, address, access_type, result, fi);
3279 static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
3280 target_ulong address,
3281 MMUAccessType access_type,
3282 GetPhysAddrResult *result,
3283 ARMMMUFaultInfo *fi)
3285 if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) {
3286 return true;
3288 if (!granule_protection_check(env, result->f.phys_addr,
3289 result->f.attrs.space, fi)) {
3290 fi->type = ARMFault_GPCFOnOutput;
3291 return true;
3293 return false;
3296 bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
3297 MMUAccessType access_type, ARMMMUIdx mmu_idx,
3298 bool is_secure, GetPhysAddrResult *result,
3299 ARMMMUFaultInfo *fi)
3301 S1Translate ptw = {
3302 .in_mmu_idx = mmu_idx,
3303 .in_secure = is_secure,
3304 .in_space = arm_secure_to_space(is_secure),
3306 return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
3309 bool get_phys_addr(CPUARMState *env, target_ulong address,
3310 MMUAccessType access_type, ARMMMUIdx mmu_idx,
3311 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
3313 S1Translate ptw = {
3314 .in_mmu_idx = mmu_idx,
3316 ARMSecuritySpace ss;
3318 switch (mmu_idx) {
3319 case ARMMMUIdx_E10_0:
3320 case ARMMMUIdx_E10_1:
3321 case ARMMMUIdx_E10_1_PAN:
3322 case ARMMMUIdx_E20_0:
3323 case ARMMMUIdx_E20_2:
3324 case ARMMMUIdx_E20_2_PAN:
3325 case ARMMMUIdx_Stage1_E0:
3326 case ARMMMUIdx_Stage1_E1:
3327 case ARMMMUIdx_Stage1_E1_PAN:
3328 case ARMMMUIdx_E2:
3329 ss = arm_security_space_below_el3(env);
3330 break;
3331 case ARMMMUIdx_Stage2:
3333 * For Secure EL2, we need this index to be NonSecure;
3334 * otherwise this will already be NonSecure or Realm.
3336 ss = arm_security_space_below_el3(env);
3337 if (ss == ARMSS_Secure) {
3338 ss = ARMSS_NonSecure;
3340 break;
3341 case ARMMMUIdx_Phys_NS:
3342 case ARMMMUIdx_MPrivNegPri:
3343 case ARMMMUIdx_MUserNegPri:
3344 case ARMMMUIdx_MPriv:
3345 case ARMMMUIdx_MUser:
3346 ss = ARMSS_NonSecure;
3347 break;
3348 case ARMMMUIdx_Stage2_S:
3349 case ARMMMUIdx_Phys_S:
3350 case ARMMMUIdx_MSPrivNegPri:
3351 case ARMMMUIdx_MSUserNegPri:
3352 case ARMMMUIdx_MSPriv:
3353 case ARMMMUIdx_MSUser:
3354 ss = ARMSS_Secure;
3355 break;
3356 case ARMMMUIdx_E3:
3357 if (arm_feature(env, ARM_FEATURE_AARCH64) &&
3358 cpu_isar_feature(aa64_rme, env_archcpu(env))) {
3359 ss = ARMSS_Root;
3360 } else {
3361 ss = ARMSS_Secure;
3363 break;
3364 case ARMMMUIdx_Phys_Root:
3365 ss = ARMSS_Root;
3366 break;
3367 case ARMMMUIdx_Phys_Realm:
3368 ss = ARMSS_Realm;
3369 break;
3370 default:
3371 g_assert_not_reached();
3374 ptw.in_space = ss;
3375 ptw.in_secure = arm_space_is_secure(ss);
3376 return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
3379 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
3380 MemTxAttrs *attrs)
3382 ARMCPU *cpu = ARM_CPU(cs);
3383 CPUARMState *env = &cpu->env;
3384 ARMMMUIdx mmu_idx = arm_mmu_idx(env);
3385 ARMSecuritySpace ss = arm_security_space(env);
3386 S1Translate ptw = {
3387 .in_mmu_idx = mmu_idx,
3388 .in_space = ss,
3389 .in_secure = arm_space_is_secure(ss),
3390 .in_debug = true,
3392 GetPhysAddrResult res = {};
3393 ARMMMUFaultInfo fi = {};
3394 bool ret;
3396 ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
3397 *attrs = res.f.attrs;
3399 if (ret) {
3400 return -1;
3402 return res.f.phys_addr;