qxl: check release info object
[qemu/ar7.git] / target / hppa / mem_helper.c
blob77fb5448386099de34f809230577c81f0b7c631b
1 /*
2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qom/cpu.h"
25 #include "trace.h"
27 #ifdef CONFIG_USER_ONLY
28 int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
29 int size, int rw, int mmu_idx)
31 HPPACPU *cpu = HPPA_CPU(cs);
33 /* ??? Test between data page fault and data memory protection trap,
34 which would affect si_code. */
35 cs->exception_index = EXCP_DMP;
36 cpu->env.cr[CR_IOR] = address;
37 return 1;
39 #else
40 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
42 int i;
44 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
45 hppa_tlb_entry *ent = &env->tlb[i];
46 if (ent->va_b <= addr && addr <= ent->va_e) {
47 trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
48 ent->va_b, ent->va_e, ent->pa);
49 return ent;
52 trace_hppa_tlb_find_entry_not_found(env, addr);
53 return NULL;
56 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
58 CPUState *cs = CPU(hppa_env_get_cpu(env));
59 unsigned i, n = 1 << (2 * ent->page_size);
60 uint64_t addr = ent->va_b;
62 trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
64 for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
65 /* Do not flush MMU_PHYS_IDX. */
66 tlb_flush_page_by_mmuidx(cs, addr, 0xf);
69 memset(ent, 0, sizeof(*ent));
70 ent->va_b = -1;
73 static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
75 hppa_tlb_entry *ent;
76 uint32_t i = env->tlb_last;
78 env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
79 ent = &env->tlb[i];
81 hppa_flush_tlb_ent(env, ent);
82 return ent;
85 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
86 int type, hwaddr *pphys, int *pprot)
88 hwaddr phys;
89 int prot, r_prot, w_prot, x_prot;
90 hppa_tlb_entry *ent;
91 int ret = -1;
93 /* Virtual translation disabled. Direct map virtual to physical. */
94 if (mmu_idx == MMU_PHYS_IDX) {
95 phys = addr;
96 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
97 goto egress;
100 /* Find a valid tlb entry that matches the virtual address. */
101 ent = hppa_find_tlb(env, addr);
102 if (ent == NULL || !ent->entry_valid) {
103 phys = 0;
104 prot = 0;
105 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
106 goto egress;
109 /* We now know the physical address. */
110 phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
112 /* Map TLB access_rights field to QEMU protection. */
113 r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
114 w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
115 x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
116 switch (ent->ar_type) {
117 case 0: /* read-only: data page */
118 prot = r_prot;
119 break;
120 case 1: /* read/write: dynamic data page */
121 prot = r_prot | w_prot;
122 break;
123 case 2: /* read/execute: normal code page */
124 prot = r_prot | x_prot;
125 break;
126 case 3: /* read/write/execute: dynamic code page */
127 prot = r_prot | w_prot | x_prot;
128 break;
129 default: /* execute: promote to privilege level type & 3 */
130 prot = x_prot;
131 break;
134 /* access_id == 0 means public page and no check is performed */
135 if ((env->psw & PSW_P) && ent->access_id) {
136 /* If bits [31:1] match, and bit 0 is set, suppress write. */
137 int match = ent->access_id * 2 + 1;
139 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
140 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
141 prot &= PAGE_READ | PAGE_EXEC;
142 if (type == PAGE_WRITE) {
143 ret = EXCP_DMPI;
144 goto egress;
149 /* No guest access type indicates a non-architectural access from
150 within QEMU. Bypass checks for access, D, B and T bits. */
151 if (type == 0) {
152 goto egress;
155 if (unlikely(!(prot & type))) {
156 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
157 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
158 goto egress;
161 /* In reverse priority order, check for conditions which raise faults.
162 As we go, remove PROT bits that cover the condition we want to check.
163 In this way, the resulting PROT will force a re-check of the
164 architectural TLB entry for the next access. */
165 if (unlikely(!ent->d)) {
166 if (type & PAGE_WRITE) {
167 /* The D bit is not set -- TLB Dirty Bit Fault. */
168 ret = EXCP_TLB_DIRTY;
170 prot &= PAGE_READ | PAGE_EXEC;
172 if (unlikely(ent->b)) {
173 if (type & PAGE_WRITE) {
174 /* The B bit is set -- Data Memory Break Fault. */
175 ret = EXCP_DMB;
177 prot &= PAGE_READ | PAGE_EXEC;
179 if (unlikely(ent->t)) {
180 if (!(type & PAGE_EXEC)) {
181 /* The T bit is set -- Page Reference Fault. */
182 ret = EXCP_PAGE_REF;
184 prot &= PAGE_EXEC;
187 egress:
188 *pphys = phys;
189 *pprot = prot;
190 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
191 return ret;
194 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
196 HPPACPU *cpu = HPPA_CPU(cs);
197 hwaddr phys;
198 int prot, excp;
200 /* If the (data) mmu is disabled, bypass translation. */
201 /* ??? We really ought to know if the code mmu is disabled too,
202 in order to get the correct debugging dumps. */
203 if (!(cpu->env.psw & PSW_D)) {
204 return addr;
207 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
208 &phys, &prot);
210 /* Since we're translating for debugging, the only error that is a
211 hard error is no translation at all. Otherwise, while a real cpu
212 access might not have permission, the debugger does. */
213 return excp == EXCP_DTLB_MISS ? -1 : phys;
216 void tlb_fill(CPUState *cs, target_ulong addr, int size,
217 MMUAccessType type, int mmu_idx, uintptr_t retaddr)
219 HPPACPU *cpu = HPPA_CPU(cs);
220 CPUHPPAState *env = &cpu->env;
221 int prot, excp, a_prot;
222 hwaddr phys;
224 switch (type) {
225 case MMU_INST_FETCH:
226 a_prot = PAGE_EXEC;
227 break;
228 case MMU_DATA_STORE:
229 a_prot = PAGE_WRITE;
230 break;
231 default:
232 a_prot = PAGE_READ;
233 break;
236 excp = hppa_get_physical_address(env, addr, mmu_idx,
237 a_prot, &phys, &prot);
238 if (unlikely(excp >= 0)) {
239 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
240 /* Failure. Raise the indicated exception. */
241 cs->exception_index = excp;
242 if (cpu->env.psw & PSW_Q) {
243 /* ??? Needs tweaking for hppa64. */
244 cpu->env.cr[CR_IOR] = addr;
245 cpu->env.cr[CR_ISR] = addr >> 32;
247 cpu_loop_exit_restore(cs, retaddr);
250 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
251 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
252 /* Success! Store the translation into the QEMU TLB. */
253 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
254 prot, mmu_idx, TARGET_PAGE_SIZE);
257 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
258 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
260 hppa_tlb_entry *empty = NULL;
261 int i;
263 /* Zap any old entries covering ADDR; notice empty entries on the way. */
264 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
265 hppa_tlb_entry *ent = &env->tlb[i];
266 if (ent->va_b <= addr && addr <= ent->va_e) {
267 if (ent->entry_valid) {
268 hppa_flush_tlb_ent(env, ent);
270 if (!empty) {
271 empty = ent;
276 /* If we didn't see an empty entry, evict one. */
277 if (empty == NULL) {
278 empty = hppa_alloc_tlb_ent(env);
281 /* Note that empty->entry_valid == 0 already. */
282 empty->va_b = addr & TARGET_PAGE_MASK;
283 empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
284 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
285 trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
288 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
289 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
291 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
293 if (unlikely(ent == NULL)) {
294 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
295 return;
298 ent->access_id = extract32(reg, 1, 18);
299 ent->u = extract32(reg, 19, 1);
300 ent->ar_pl2 = extract32(reg, 20, 2);
301 ent->ar_pl1 = extract32(reg, 22, 2);
302 ent->ar_type = extract32(reg, 24, 3);
303 ent->b = extract32(reg, 27, 1);
304 ent->d = extract32(reg, 28, 1);
305 ent->t = extract32(reg, 29, 1);
306 ent->entry_valid = 1;
307 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
308 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
311 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
312 synchronous across all processors. */
313 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
315 CPUHPPAState *env = cpu->env_ptr;
316 target_ulong addr = (target_ulong) data.target_ptr;
317 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
319 if (ent && ent->entry_valid) {
320 hppa_flush_tlb_ent(env, ent);
324 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
326 CPUState *src = CPU(hppa_env_get_cpu(env));
327 CPUState *cpu;
328 trace_hppa_tlb_ptlb(env);
329 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
331 CPU_FOREACH(cpu) {
332 if (cpu != src) {
333 async_run_on_cpu(cpu, ptlb_work, data);
336 async_safe_run_on_cpu(src, ptlb_work, data);
339 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
340 number of pages/entries (we choose all), and is local to the cpu. */
341 void HELPER(ptlbe)(CPUHPPAState *env)
343 CPUState *src = CPU(hppa_env_get_cpu(env));
344 trace_hppa_tlb_ptlbe(env);
345 memset(env->tlb, 0, sizeof(env->tlb));
346 tlb_flush_by_mmuidx(src, 0xf);
349 void cpu_hppa_change_prot_id(CPUHPPAState *env)
351 if (env->psw & PSW_P) {
352 CPUState *src = CPU(hppa_env_get_cpu(env));
353 tlb_flush_by_mmuidx(src, 0xf);
357 void HELPER(change_prot_id)(CPUHPPAState *env)
359 cpu_hppa_change_prot_id(env);
362 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
364 hwaddr phys;
365 int prot, excp;
367 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
368 &phys, &prot);
369 if (excp >= 0) {
370 if (env->psw & PSW_Q) {
371 /* ??? Needs tweaking for hppa64. */
372 env->cr[CR_IOR] = addr;
373 env->cr[CR_ISR] = addr >> 32;
375 if (excp == EXCP_DTLB_MISS) {
376 excp = EXCP_NA_DTLB_MISS;
378 trace_hppa_tlb_lpa_failed(env, addr);
379 hppa_dynamic_excp(env, excp, GETPC());
381 trace_hppa_tlb_lpa_success(env, addr, phys);
382 return phys;
385 /* Return the ar_type of the TLB at VADDR, or -1. */
386 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
388 hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
389 return ent ? ent->ar_type : -1;
391 #endif /* CONFIG_USER_ONLY */