2 * ARM TLB (Translation lookaside buffer) helpers.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
8 #include "qemu/osdep.h"
10 #include "internals.h"
11 #include "exec/exec-all.h"
13 static inline uint32_t merge_syn_data_abort(uint32_t template_syn
,
14 unsigned int target_el
,
15 bool same_el
, bool ea
,
16 bool s1ptw
, bool is_write
,
22 * ISV is only set for data aborts routed to EL2 and
23 * never for stage-1 page table walks faulting on stage 2.
25 * Furthermore, ISV is only set for certain kinds of load/stores.
26 * If the template syndrome does not have ISV set, we should leave
29 * See ARMv8 specs, D7-1974:
30 * ISS encoding for an exception from a Data Abort, the
33 if (!(template_syn
& ARM_EL_ISV
) || target_el
!= 2 || s1ptw
) {
34 syn
= syn_data_abort_no_iss(same_el
, 0,
35 ea
, 0, s1ptw
, is_write
, fsc
);
38 * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
39 * syndrome created at translation time.
40 * Now we create the runtime syndrome with the remaining fields.
42 syn
= syn_data_abort_with_iss(same_el
,
44 ea
, 0, s1ptw
, is_write
, fsc
,
46 /* Merge the runtime syndrome with the template syndrome. */
52 static void QEMU_NORETURN
arm_deliver_fault(ARMCPU
*cpu
, vaddr addr
,
53 MMUAccessType access_type
,
54 int mmu_idx
, ARMMMUFaultInfo
*fi
)
56 CPUARMState
*env
= &cpu
->env
;
59 uint32_t syn
, exc
, fsr
, fsc
;
60 ARMMMUIdx arm_mmu_idx
= core_to_arm_mmu_idx(env
, mmu_idx
);
62 target_el
= exception_target_el(env
);
65 env
->cp15
.hpfar_el2
= extract64(fi
->s2addr
, 12, 47) << 4;
66 if (arm_is_secure_below_el3(env
) && fi
->s1ns
) {
67 env
->cp15
.hpfar_el2
|= HPFAR_NS
;
70 same_el
= (arm_current_el(env
) == target_el
);
72 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
73 arm_s1_regime_using_lpae_format(env
, arm_mmu_idx
)) {
75 * LPAE format fault status register : bottom 6 bits are
76 * status code in the same form as needed for syndrome
78 fsr
= arm_fi_to_lfsc(fi
);
79 fsc
= extract32(fsr
, 0, 6);
81 fsr
= arm_fi_to_sfsc(fi
);
83 * Short format FSR : this fault will never actually be reported
84 * to an EL that uses a syndrome register. Use a (currently)
85 * reserved FSR code in case the constructed syndrome does leak
86 * into the guest somehow.
91 if (access_type
== MMU_INST_FETCH
) {
92 syn
= syn_insn_abort(same_el
, fi
->ea
, fi
->s1ptw
, fsc
);
93 exc
= EXCP_PREFETCH_ABORT
;
95 syn
= merge_syn_data_abort(env
->exception
.syndrome
, target_el
,
96 same_el
, fi
->ea
, fi
->s1ptw
,
97 access_type
== MMU_DATA_STORE
,
99 if (access_type
== MMU_DATA_STORE
100 && arm_feature(env
, ARM_FEATURE_V6
)) {
103 exc
= EXCP_DATA_ABORT
;
106 env
->exception
.vaddress
= addr
;
107 env
->exception
.fsr
= fsr
;
108 raise_exception(env
, exc
, syn
, target_el
);
111 /* Raise a data fault alignment exception for the specified virtual address */
112 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
113 MMUAccessType access_type
,
114 int mmu_idx
, uintptr_t retaddr
)
116 ARMCPU
*cpu
= ARM_CPU(cs
);
117 ARMMMUFaultInfo fi
= {};
119 /* now we have a real cpu fault */
120 cpu_restore_state(cs
, retaddr
, true);
122 fi
.type
= ARMFault_Alignment
;
123 arm_deliver_fault(cpu
, vaddr
, access_type
, mmu_idx
, &fi
);
126 #if !defined(CONFIG_USER_ONLY)
129 * arm_cpu_do_transaction_failed: handle a memory system error response
130 * (eg "no device/memory present at address") by raising an external abort
133 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
134 vaddr addr
, unsigned size
,
135 MMUAccessType access_type
,
136 int mmu_idx
, MemTxAttrs attrs
,
137 MemTxResult response
, uintptr_t retaddr
)
139 ARMCPU
*cpu
= ARM_CPU(cs
);
140 ARMMMUFaultInfo fi
= {};
142 /* now we have a real cpu fault */
143 cpu_restore_state(cs
, retaddr
, true);
145 fi
.ea
= arm_extabort_type(response
);
146 fi
.type
= ARMFault_SyncExternal
;
147 arm_deliver_fault(cpu
, addr
, access_type
, mmu_idx
, &fi
);
150 #endif /* !defined(CONFIG_USER_ONLY) */
152 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
153 MMUAccessType access_type
, int mmu_idx
,
154 bool probe
, uintptr_t retaddr
)
156 ARMCPU
*cpu
= ARM_CPU(cs
);
157 ARMMMUFaultInfo fi
= {};
159 #ifdef CONFIG_USER_ONLY
160 int flags
= page_get_flags(useronly_clean_ptr(address
));
161 if (flags
& PAGE_VALID
) {
162 fi
.type
= ARMFault_Permission
;
164 fi
.type
= ARMFault_Translation
;
168 /* now we have a real cpu fault */
169 cpu_restore_state(cs
, retaddr
, true);
170 arm_deliver_fault(cpu
, address
, access_type
, mmu_idx
, &fi
);
173 target_ulong page_size
;
175 MemTxAttrs attrs
= {};
176 ARMCacheAttrs cacheattrs
= {};
179 * Walk the page table and (if the mapping exists) add the page
180 * to the TLB. On success, return true. Otherwise, if probing,
181 * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
182 * register format, and signal the fault.
184 ret
= get_phys_addr(&cpu
->env
, address
, access_type
,
185 core_to_arm_mmu_idx(&cpu
->env
, mmu_idx
),
186 &phys_addr
, &attrs
, &prot
, &page_size
,
190 * Map a single [sub]page. Regions smaller than our declared
191 * target page size are handled specially, so for those we
192 * pass in the exact addresses.
194 if (page_size
>= TARGET_PAGE_SIZE
) {
195 phys_addr
&= TARGET_PAGE_MASK
;
196 address
&= TARGET_PAGE_MASK
;
198 /* Notice and record tagged memory. */
199 if (cpu_isar_feature(aa64_mte
, cpu
) && cacheattrs
.attrs
== 0xf0) {
200 arm_tlb_mte_tagged(&attrs
) = true;
203 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
204 prot
, mmu_idx
, page_size
);
209 /* now we have a real cpu fault */
210 cpu_restore_state(cs
, retaddr
, true);
211 arm_deliver_fault(cpu
, address
, access_type
, mmu_idx
, &fi
);