s390x/pv: Implement a CGS check helper
[qemu/kevin.git] / target / arm / tlb_helper.c
blob60abcbebe6483bd4cab63a7c53b7cd29831ac223
1 /*
2 * ARM TLB (Translation lookaside buffer) helpers.
4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8 #include "qemu/osdep.h"
9 #include "cpu.h"
10 #include "internals.h"
11 #include "exec/exec-all.h"
12 #include "exec/helper-proto.h"
15 /* Return true if the translation regime is using LPAE format page tables */
16 bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
18 int el = regime_el(env, mmu_idx);
19 if (el == 2 || arm_el_is_aa64(env, el)) {
20 return true;
22 if (arm_feature(env, ARM_FEATURE_PMSA) &&
23 arm_feature(env, ARM_FEATURE_V8)) {
24 return true;
26 if (arm_feature(env, ARM_FEATURE_LPAE)
27 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
28 return true;
30 return false;
34 * Returns true if the stage 1 translation regime is using LPAE format page
35 * tables. Used when raising alignment exceptions, whose FSR changes depending
36 * on whether the long or short descriptor format is in use.
38 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
40 mmu_idx = stage_1_mmu_idx(mmu_idx);
41 return regime_using_lpae_format(env, mmu_idx);
44 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
45 unsigned int target_el,
46 bool same_el, bool ea,
47 bool s1ptw, bool is_write,
48 int fsc)
50 uint32_t syn;
53 * ISV is only set for data aborts routed to EL2 and
54 * never for stage-1 page table walks faulting on stage 2.
56 * Furthermore, ISV is only set for certain kinds of load/stores.
57 * If the template syndrome does not have ISV set, we should leave
58 * it cleared.
60 * See ARMv8 specs, D7-1974:
61 * ISS encoding for an exception from a Data Abort, the
62 * ISV field.
64 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
65 syn = syn_data_abort_no_iss(same_el, 0,
66 ea, 0, s1ptw, is_write, fsc);
67 } else {
69 * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
70 * syndrome created at translation time.
71 * Now we create the runtime syndrome with the remaining fields.
73 syn = syn_data_abort_with_iss(same_el,
74 0, 0, 0, 0, 0,
75 ea, 0, s1ptw, is_write, fsc,
76 true);
77 /* Merge the runtime syndrome with the template syndrome. */
78 syn |= template_syn;
80 return syn;
83 static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi,
84 int target_el, int mmu_idx, uint32_t *ret_fsc)
86 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
87 uint32_t fsr, fsc;
89 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
90 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
92 * LPAE format fault status register : bottom 6 bits are
93 * status code in the same form as needed for syndrome
95 fsr = arm_fi_to_lfsc(fi);
96 fsc = extract32(fsr, 0, 6);
97 } else {
98 fsr = arm_fi_to_sfsc(fi);
100 * Short format FSR : this fault will never actually be reported
101 * to an EL that uses a syndrome register. Use a (currently)
102 * reserved FSR code in case the constructed syndrome does leak
103 * into the guest somehow.
105 fsc = 0x3f;
108 *ret_fsc = fsc;
109 return fsr;
112 static G_NORETURN
113 void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
114 MMUAccessType access_type,
115 int mmu_idx, ARMMMUFaultInfo *fi)
117 CPUARMState *env = &cpu->env;
118 int target_el;
119 bool same_el;
120 uint32_t syn, exc, fsr, fsc;
122 target_el = exception_target_el(env);
123 if (fi->stage2) {
124 target_el = 2;
125 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
126 if (arm_is_secure_below_el3(env) && fi->s1ns) {
127 env->cp15.hpfar_el2 |= HPFAR_NS;
130 same_el = (arm_current_el(env) == target_el);
132 fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
134 if (access_type == MMU_INST_FETCH) {
135 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
136 exc = EXCP_PREFETCH_ABORT;
137 } else {
138 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
139 same_el, fi->ea, fi->s1ptw,
140 access_type == MMU_DATA_STORE,
141 fsc);
142 if (access_type == MMU_DATA_STORE
143 && arm_feature(env, ARM_FEATURE_V6)) {
144 fsr |= (1 << 11);
146 exc = EXCP_DATA_ABORT;
149 env->exception.vaddress = addr;
150 env->exception.fsr = fsr;
151 raise_exception(env, exc, syn, target_el);
154 /* Raise a data fault alignment exception for the specified virtual address */
155 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
156 MMUAccessType access_type,
157 int mmu_idx, uintptr_t retaddr)
159 ARMCPU *cpu = ARM_CPU(cs);
160 ARMMMUFaultInfo fi = {};
162 /* now we have a real cpu fault */
163 cpu_restore_state(cs, retaddr);
165 fi.type = ARMFault_Alignment;
166 arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
169 void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
171 ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
172 int target_el = exception_target_el(env);
173 int mmu_idx = cpu_mmu_index(env, true);
174 uint32_t fsc;
176 env->exception.vaddress = pc;
179 * Note that the fsc is not applicable to this exception,
180 * since any syndrome is pcalignment not insn_abort.
182 env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc);
183 raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);
186 #if !defined(CONFIG_USER_ONLY)
189 * arm_cpu_do_transaction_failed: handle a memory system error response
190 * (eg "no device/memory present at address") by raising an external abort
191 * exception
193 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
194 vaddr addr, unsigned size,
195 MMUAccessType access_type,
196 int mmu_idx, MemTxAttrs attrs,
197 MemTxResult response, uintptr_t retaddr)
199 ARMCPU *cpu = ARM_CPU(cs);
200 ARMMMUFaultInfo fi = {};
202 /* now we have a real cpu fault */
203 cpu_restore_state(cs, retaddr);
205 fi.ea = arm_extabort_type(response);
206 fi.type = ARMFault_SyncExternal;
207 arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
210 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
211 MMUAccessType access_type, int mmu_idx,
212 bool probe, uintptr_t retaddr)
214 ARMCPU *cpu = ARM_CPU(cs);
215 GetPhysAddrResult res = {};
216 ARMMMUFaultInfo local_fi, *fi;
217 int ret;
220 * Allow S1_ptw_translate to see any fault generated here.
221 * Since this may recurse, read and clear.
223 fi = cpu->env.tlb_fi;
224 if (fi) {
225 cpu->env.tlb_fi = NULL;
226 } else {
227 fi = memset(&local_fi, 0, sizeof(local_fi));
231 * Walk the page table and (if the mapping exists) add the page
232 * to the TLB. On success, return true. Otherwise, if probing,
233 * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
234 * register format, and signal the fault.
236 ret = get_phys_addr(&cpu->env, address, access_type,
237 core_to_arm_mmu_idx(&cpu->env, mmu_idx),
238 &res, fi);
239 if (likely(!ret)) {
241 * Map a single [sub]page. Regions smaller than our declared
242 * target page size are handled specially, so for those we
243 * pass in the exact addresses.
245 if (res.f.lg_page_size >= TARGET_PAGE_BITS) {
246 res.f.phys_addr &= TARGET_PAGE_MASK;
247 address &= TARGET_PAGE_MASK;
250 res.f.pte_attrs = res.cacheattrs.attrs;
251 res.f.shareability = res.cacheattrs.shareability;
253 tlb_set_page_full(cs, mmu_idx, address, &res.f);
254 return true;
255 } else if (probe) {
256 return false;
257 } else {
258 /* now we have a real cpu fault */
259 cpu_restore_state(cs, retaddr);
260 arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
263 #else
264 void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
265 MMUAccessType access_type,
266 bool maperr, uintptr_t ra)
268 ARMMMUFaultInfo fi = {
269 .type = maperr ? ARMFault_Translation : ARMFault_Permission,
270 .level = 3,
272 ARMCPU *cpu = ARM_CPU(cs);
275 * We report both ESR and FAR to signal handlers.
276 * For now, it's easiest to deliver the fault normally.
278 cpu_restore_state(cs, ra);
279 arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
282 void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
283 MMUAccessType access_type, uintptr_t ra)
285 arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
287 #endif /* !defined(CONFIG_USER_ONLY) */