memory: unify loops to sync dirty log bitmap
[qemu/ar7.git] / target / hppa / mem_helper.c
blobab160c2a74b6ec32dbce9c88dd6c2d7435044530
1 /*
2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qom/cpu.h"
26 #ifdef CONFIG_USER_ONLY
27 int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
28 int size, int rw, int mmu_idx)
30 HPPACPU *cpu = HPPA_CPU(cs);
32 /* ??? Test between data page fault and data memory protection trap,
33 which would affect si_code. */
34 cs->exception_index = EXCP_DMP;
35 cpu->env.cr[CR_IOR] = address;
36 return 1;
38 #else
39 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
41 int i;
43 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
44 hppa_tlb_entry *ent = &env->tlb[i];
45 if (ent->va_b <= addr && addr <= ent->va_e) {
46 return ent;
49 return NULL;
52 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
54 CPUState *cs = CPU(hppa_env_get_cpu(env));
55 unsigned i, n = 1 << (2 * ent->page_size);
56 uint64_t addr = ent->va_b;
58 for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
59 /* Do not flush MMU_PHYS_IDX. */
60 tlb_flush_page_by_mmuidx(cs, addr, 0xf);
63 memset(ent, 0, sizeof(*ent));
64 ent->va_b = -1;
67 static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
69 hppa_tlb_entry *ent;
70 uint32_t i = env->tlb_last;
72 env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
73 ent = &env->tlb[i];
75 hppa_flush_tlb_ent(env, ent);
76 return ent;
79 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
80 int type, hwaddr *pphys, int *pprot)
82 hwaddr phys;
83 int prot, r_prot, w_prot, x_prot;
84 hppa_tlb_entry *ent;
85 int ret = -1;
87 /* Virtual translation disabled. Direct map virtual to physical. */
88 if (mmu_idx == MMU_PHYS_IDX) {
89 phys = addr;
90 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
91 goto egress;
94 /* Find a valid tlb entry that matches the virtual address. */
95 ent = hppa_find_tlb(env, addr);
96 if (ent == NULL || !ent->entry_valid) {
97 phys = 0;
98 prot = 0;
99 /* ??? Unconditionally report data tlb miss,
100 even if this is an instruction fetch. */
101 ret = EXCP_DTLB_MISS;
102 goto egress;
105 /* We now know the physical address. */
106 phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
108 /* Map TLB access_rights field to QEMU protection. */
109 r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
110 w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
111 x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
112 switch (ent->ar_type) {
113 case 0: /* read-only: data page */
114 prot = r_prot;
115 break;
116 case 1: /* read/write: dynamic data page */
117 prot = r_prot | w_prot;
118 break;
119 case 2: /* read/execute: normal code page */
120 prot = r_prot | x_prot;
121 break;
122 case 3: /* read/write/execute: dynamic code page */
123 prot = r_prot | w_prot | x_prot;
124 break;
125 default: /* execute: promote to privilege level type & 3 */
126 prot = x_prot;
127 break;
130 /* ??? Check PSW_P and ent->access_prot. This can remove PAGE_WRITE. */
132 /* No guest access type indicates a non-architectural access from
133 within QEMU. Bypass checks for access, D, B and T bits. */
134 if (type == 0) {
135 goto egress;
138 if (unlikely(!(prot & type))) {
139 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
140 ret = (type & PAGE_EXEC ? EXCP_IMP : EXCP_DMP);
141 goto egress;
144 /* In reverse priority order, check for conditions which raise faults.
145 As we go, remove PROT bits that cover the condition we want to check.
146 In this way, the resulting PROT will force a re-check of the
147 architectural TLB entry for the next access. */
148 if (unlikely(!ent->d)) {
149 if (type & PAGE_WRITE) {
150 /* The D bit is not set -- TLB Dirty Bit Fault. */
151 ret = EXCP_TLB_DIRTY;
153 prot &= PAGE_READ | PAGE_EXEC;
155 if (unlikely(ent->b)) {
156 if (type & PAGE_WRITE) {
157 /* The B bit is set -- Data Memory Break Fault. */
158 ret = EXCP_DMB;
160 prot &= PAGE_READ | PAGE_EXEC;
162 if (unlikely(ent->t)) {
163 if (!(type & PAGE_EXEC)) {
164 /* The T bit is set -- Page Reference Fault. */
165 ret = EXCP_PAGE_REF;
167 prot &= PAGE_EXEC;
170 egress:
171 *pphys = phys;
172 *pprot = prot;
173 return ret;
176 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
178 HPPACPU *cpu = HPPA_CPU(cs);
179 hwaddr phys;
180 int prot, excp;
182 /* If the (data) mmu is disabled, bypass translation. */
183 /* ??? We really ought to know if the code mmu is disabled too,
184 in order to get the correct debugging dumps. */
185 if (!(cpu->env.psw & PSW_D)) {
186 return addr;
189 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
190 &phys, &prot);
192 /* Since we're translating for debugging, the only error that is a
193 hard error is no translation at all. Otherwise, while a real cpu
194 access might not have permission, the debugger does. */
195 return excp == EXCP_DTLB_MISS ? -1 : phys;
198 void tlb_fill(CPUState *cs, target_ulong addr, int size,
199 MMUAccessType type, int mmu_idx, uintptr_t retaddr)
201 HPPACPU *cpu = HPPA_CPU(cs);
202 int prot, excp, a_prot;
203 hwaddr phys;
205 switch (type) {
206 case MMU_INST_FETCH:
207 a_prot = PAGE_EXEC;
208 break;
209 case MMU_DATA_STORE:
210 a_prot = PAGE_WRITE;
211 break;
212 default:
213 a_prot = PAGE_READ;
214 break;
217 excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx,
218 a_prot, &phys, &prot);
219 if (unlikely(excp >= 0)) {
220 /* Failure. Raise the indicated exception. */
221 cs->exception_index = excp;
222 if (cpu->env.psw & PSW_Q) {
223 /* ??? Needs tweaking for hppa64. */
224 cpu->env.cr[CR_IOR] = addr;
225 cpu->env.cr[CR_ISR] = addr >> 32;
227 cpu_loop_exit_restore(cs, retaddr);
230 /* Success! Store the translation into the QEMU TLB. */
231 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
232 prot, mmu_idx, TARGET_PAGE_SIZE);
235 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
236 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
238 hppa_tlb_entry *empty = NULL;
239 int i;
241 /* Zap any old entries covering ADDR; notice empty entries on the way. */
242 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
243 hppa_tlb_entry *ent = &env->tlb[i];
244 if (!ent->entry_valid) {
245 empty = ent;
246 } else if (ent->va_b <= addr && addr <= ent->va_e) {
247 hppa_flush_tlb_ent(env, ent);
248 empty = ent;
252 /* If we didn't see an empty entry, evict one. */
253 if (empty == NULL) {
254 empty = hppa_alloc_tlb_ent(env);
257 /* Note that empty->entry_valid == 0 already. */
258 empty->va_b = addr & TARGET_PAGE_MASK;
259 empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
260 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
263 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
264 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
266 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
268 if (unlikely(ent == NULL || ent->entry_valid)) {
269 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
270 return;
273 ent->access_id = extract32(reg, 1, 18);
274 ent->u = extract32(reg, 19, 1);
275 ent->ar_pl2 = extract32(reg, 20, 2);
276 ent->ar_pl1 = extract32(reg, 22, 2);
277 ent->ar_type = extract32(reg, 24, 3);
278 ent->b = extract32(reg, 27, 1);
279 ent->d = extract32(reg, 28, 1);
280 ent->t = extract32(reg, 29, 1);
281 ent->entry_valid = 1;
284 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
285 synchronous across all processors. */
286 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
288 CPUHPPAState *env = cpu->env_ptr;
289 target_ulong addr = (target_ulong) data.target_ptr;
290 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
292 if (ent && ent->entry_valid) {
293 hppa_flush_tlb_ent(env, ent);
297 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
299 CPUState *src = CPU(hppa_env_get_cpu(env));
300 CPUState *cpu;
301 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
303 CPU_FOREACH(cpu) {
304 if (cpu != src) {
305 async_run_on_cpu(cpu, ptlb_work, data);
308 async_safe_run_on_cpu(src, ptlb_work, data);
311 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
312 number of pages/entries (we choose all), and is local to the cpu. */
313 void HELPER(ptlbe)(CPUHPPAState *env)
315 CPUState *src = CPU(hppa_env_get_cpu(env));
317 memset(env->tlb, 0, sizeof(env->tlb));
318 tlb_flush_by_mmuidx(src, 0xf);
321 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
323 hwaddr phys;
324 int prot, excp;
326 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
327 &phys, &prot);
328 if (excp >= 0) {
329 if (env->psw & PSW_Q) {
330 /* ??? Needs tweaking for hppa64. */
331 env->cr[CR_IOR] = addr;
332 env->cr[CR_ISR] = addr >> 32;
334 if (excp == EXCP_DTLB_MISS) {
335 excp = EXCP_NA_DTLB_MISS;
337 hppa_dynamic_excp(env, excp, GETPC());
339 return phys;
342 /* Return the ar_type of the TLB at VADDR, or -1. */
343 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
345 hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
346 return ent ? ent->ar_type : -1;
348 #endif /* CONFIG_USER_ONLY */