2 * ARM TLB (Translation lookaside buffer) helpers.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
8 #include "qemu/osdep.h"
10 #include "internals.h"
11 #include "exec/exec-all.h"
12 #include "exec/helper-proto.h"
15 /* Return true if the translation regime is using LPAE format page tables */
16 bool regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
18 int el
= regime_el(env
, mmu_idx
);
19 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
22 if (arm_feature(env
, ARM_FEATURE_LPAE
)
23 && (regime_tcr(env
, mmu_idx
) & TTBCR_EAE
)) {
30 * Returns true if the stage 1 translation regime is using LPAE format page
31 * tables. Used when raising alignment exceptions, whose FSR changes depending
32 * on whether the long or short descriptor format is in use.
34 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
36 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
37 return regime_using_lpae_format(env
, mmu_idx
);
40 static inline uint32_t merge_syn_data_abort(uint32_t template_syn
,
41 unsigned int target_el
,
42 bool same_el
, bool ea
,
43 bool s1ptw
, bool is_write
,
49 * ISV is only set for data aborts routed to EL2 and
50 * never for stage-1 page table walks faulting on stage 2.
52 * Furthermore, ISV is only set for certain kinds of load/stores.
53 * If the template syndrome does not have ISV set, we should leave
56 * See ARMv8 specs, D7-1974:
57 * ISS encoding for an exception from a Data Abort, the
60 if (!(template_syn
& ARM_EL_ISV
) || target_el
!= 2 || s1ptw
) {
61 syn
= syn_data_abort_no_iss(same_el
, 0,
62 ea
, 0, s1ptw
, is_write
, fsc
);
65 * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
66 * syndrome created at translation time.
67 * Now we create the runtime syndrome with the remaining fields.
69 syn
= syn_data_abort_with_iss(same_el
,
71 ea
, 0, s1ptw
, is_write
, fsc
,
73 /* Merge the runtime syndrome with the template syndrome. */
79 static uint32_t compute_fsr_fsc(CPUARMState
*env
, ARMMMUFaultInfo
*fi
,
80 int target_el
, int mmu_idx
, uint32_t *ret_fsc
)
82 ARMMMUIdx arm_mmu_idx
= core_to_arm_mmu_idx(env
, mmu_idx
);
85 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
) ||
86 arm_s1_regime_using_lpae_format(env
, arm_mmu_idx
)) {
88 * LPAE format fault status register : bottom 6 bits are
89 * status code in the same form as needed for syndrome
91 fsr
= arm_fi_to_lfsc(fi
);
92 fsc
= extract32(fsr
, 0, 6);
94 fsr
= arm_fi_to_sfsc(fi
);
96 * Short format FSR : this fault will never actually be reported
97 * to an EL that uses a syndrome register. Use a (currently)
98 * reserved FSR code in case the constructed syndrome does leak
99 * into the guest somehow.
109 void arm_deliver_fault(ARMCPU
*cpu
, vaddr addr
,
110 MMUAccessType access_type
,
111 int mmu_idx
, ARMMMUFaultInfo
*fi
)
113 CPUARMState
*env
= &cpu
->env
;
116 uint32_t syn
, exc
, fsr
, fsc
;
118 target_el
= exception_target_el(env
);
121 env
->cp15
.hpfar_el2
= extract64(fi
->s2addr
, 12, 47) << 4;
122 if (arm_is_secure_below_el3(env
) && fi
->s1ns
) {
123 env
->cp15
.hpfar_el2
|= HPFAR_NS
;
126 same_el
= (arm_current_el(env
) == target_el
);
128 fsr
= compute_fsr_fsc(env
, fi
, target_el
, mmu_idx
, &fsc
);
130 if (access_type
== MMU_INST_FETCH
) {
131 syn
= syn_insn_abort(same_el
, fi
->ea
, fi
->s1ptw
, fsc
);
132 exc
= EXCP_PREFETCH_ABORT
;
134 syn
= merge_syn_data_abort(env
->exception
.syndrome
, target_el
,
135 same_el
, fi
->ea
, fi
->s1ptw
,
136 access_type
== MMU_DATA_STORE
,
138 if (access_type
== MMU_DATA_STORE
139 && arm_feature(env
, ARM_FEATURE_V6
)) {
142 exc
= EXCP_DATA_ABORT
;
145 env
->exception
.vaddress
= addr
;
146 env
->exception
.fsr
= fsr
;
147 raise_exception(env
, exc
, syn
, target_el
);
150 /* Raise a data fault alignment exception for the specified virtual address */
151 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
152 MMUAccessType access_type
,
153 int mmu_idx
, uintptr_t retaddr
)
155 ARMCPU
*cpu
= ARM_CPU(cs
);
156 ARMMMUFaultInfo fi
= {};
158 /* now we have a real cpu fault */
159 cpu_restore_state(cs
, retaddr
);
161 fi
.type
= ARMFault_Alignment
;
162 arm_deliver_fault(cpu
, vaddr
, access_type
, mmu_idx
, &fi
);
165 void helper_exception_pc_alignment(CPUARMState
*env
, target_ulong pc
)
167 ARMMMUFaultInfo fi
= { .type
= ARMFault_Alignment
};
168 int target_el
= exception_target_el(env
);
169 int mmu_idx
= cpu_mmu_index(env
, true);
172 env
->exception
.vaddress
= pc
;
175 * Note that the fsc is not applicable to this exception,
176 * since any syndrome is pcalignment not insn_abort.
178 env
->exception
.fsr
= compute_fsr_fsc(env
, &fi
, target_el
, mmu_idx
, &fsc
);
179 raise_exception(env
, EXCP_PREFETCH_ABORT
, syn_pcalignment(), target_el
);
182 #if !defined(CONFIG_USER_ONLY)
185 * arm_cpu_do_transaction_failed: handle a memory system error response
186 * (eg "no device/memory present at address") by raising an external abort
189 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
190 vaddr addr
, unsigned size
,
191 MMUAccessType access_type
,
192 int mmu_idx
, MemTxAttrs attrs
,
193 MemTxResult response
, uintptr_t retaddr
)
195 ARMCPU
*cpu
= ARM_CPU(cs
);
196 ARMMMUFaultInfo fi
= {};
198 /* now we have a real cpu fault */
199 cpu_restore_state(cs
, retaddr
);
201 fi
.ea
= arm_extabort_type(response
);
202 fi
.type
= ARMFault_SyncExternal
;
203 arm_deliver_fault(cpu
, addr
, access_type
, mmu_idx
, &fi
);
206 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
207 MMUAccessType access_type
, int mmu_idx
,
208 bool probe
, uintptr_t retaddr
)
210 ARMCPU
*cpu
= ARM_CPU(cs
);
211 GetPhysAddrResult res
= {};
212 ARMMMUFaultInfo local_fi
, *fi
;
216 * Allow S1_ptw_translate to see any fault generated here.
217 * Since this may recurse, read and clear.
219 fi
= cpu
->env
.tlb_fi
;
221 cpu
->env
.tlb_fi
= NULL
;
223 fi
= memset(&local_fi
, 0, sizeof(local_fi
));
227 * Walk the page table and (if the mapping exists) add the page
228 * to the TLB. On success, return true. Otherwise, if probing,
229 * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
230 * register format, and signal the fault.
232 ret
= get_phys_addr(&cpu
->env
, address
, access_type
,
233 core_to_arm_mmu_idx(&cpu
->env
, mmu_idx
),
237 * Map a single [sub]page. Regions smaller than our declared
238 * target page size are handled specially, so for those we
239 * pass in the exact addresses.
241 if (res
.f
.lg_page_size
>= TARGET_PAGE_BITS
) {
242 res
.f
.phys_addr
&= TARGET_PAGE_MASK
;
243 address
&= TARGET_PAGE_MASK
;
246 res
.f
.pte_attrs
= res
.cacheattrs
.attrs
;
247 res
.f
.shareability
= res
.cacheattrs
.shareability
;
249 tlb_set_page_full(cs
, mmu_idx
, address
, &res
.f
);
254 /* now we have a real cpu fault */
255 cpu_restore_state(cs
, retaddr
);
256 arm_deliver_fault(cpu
, address
, access_type
, mmu_idx
, fi
);
260 void arm_cpu_record_sigsegv(CPUState
*cs
, vaddr addr
,
261 MMUAccessType access_type
,
262 bool maperr
, uintptr_t ra
)
264 ARMMMUFaultInfo fi
= {
265 .type
= maperr
? ARMFault_Translation
: ARMFault_Permission
,
268 ARMCPU
*cpu
= ARM_CPU(cs
);
271 * We report both ESR and FAR to signal handlers.
272 * For now, it's easiest to deliver the fault normally.
274 cpu_restore_state(cs
, ra
);
275 arm_deliver_fault(cpu
, addr
, access_type
, MMU_USER_IDX
, &fi
);
278 void arm_cpu_record_sigbus(CPUState
*cs
, vaddr addr
,
279 MMUAccessType access_type
, uintptr_t ra
)
281 arm_cpu_do_unaligned_access(cs
, addr
, access_type
, MMU_USER_IDX
, ra
);
283 #endif /* !defined(CONFIG_USER_ONLY) */