target/arm: Fix int128_make128 lo, hi order in paired_cmpxchg64_be
[qemu/ar7.git] / target / hppa / mem_helper.c
blobaecf3075f6bd616d4f19ba46c573cfe344330b40
1 /*
2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qom/cpu.h"
26 #ifdef CONFIG_USER_ONLY
27 int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
28 int size, int rw, int mmu_idx)
30 HPPACPU *cpu = HPPA_CPU(cs);
32 /* ??? Test between data page fault and data memory protection trap,
33 which would affect si_code. */
34 cs->exception_index = EXCP_DMP;
35 cpu->env.cr[CR_IOR] = address;
36 return 1;
38 #else
39 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
41 int i;
43 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
44 hppa_tlb_entry *ent = &env->tlb[i];
45 if (ent->va_b <= addr && addr <= ent->va_e) {
46 return ent;
49 return NULL;
52 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
54 CPUState *cs = CPU(hppa_env_get_cpu(env));
55 unsigned i, n = 1 << (2 * ent->page_size);
56 uint64_t addr = ent->va_b;
58 for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
59 /* Do not flush MMU_PHYS_IDX. */
60 tlb_flush_page_by_mmuidx(cs, addr, 0xf);
63 memset(ent, 0, sizeof(*ent));
64 ent->va_b = -1;
67 static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
69 hppa_tlb_entry *ent;
70 uint32_t i = env->tlb_last;
72 env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
73 ent = &env->tlb[i];
75 hppa_flush_tlb_ent(env, ent);
76 return ent;
79 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
80 int type, hwaddr *pphys, int *pprot)
82 hwaddr phys;
83 int prot, r_prot, w_prot, x_prot;
84 hppa_tlb_entry *ent;
85 int ret = -1;
87 /* Virtual translation disabled. Direct map virtual to physical. */
88 if (mmu_idx == MMU_PHYS_IDX) {
89 phys = addr;
90 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
91 goto egress;
94 /* Find a valid tlb entry that matches the virtual address. */
95 ent = hppa_find_tlb(env, addr);
96 if (ent == NULL || !ent->entry_valid) {
97 phys = 0;
98 prot = 0;
99 /* ??? Unconditionally report data tlb miss,
100 even if this is an instruction fetch. */
101 ret = EXCP_DTLB_MISS;
102 goto egress;
105 /* We now know the physical address. */
106 phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
108 /* Map TLB access_rights field to QEMU protection. */
109 r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
110 w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
111 x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
112 switch (ent->ar_type) {
113 case 0: /* read-only: data page */
114 prot = r_prot;
115 break;
116 case 1: /* read/write: dynamic data page */
117 prot = r_prot | w_prot;
118 break;
119 case 2: /* read/execute: normal code page */
120 prot = r_prot | x_prot;
121 break;
122 case 3: /* read/write/execute: dynamic code page */
123 prot = r_prot | w_prot | x_prot;
124 break;
125 default: /* execute: promote to privilege level type & 3 */
126 prot = x_prot;
127 break;
130 /* ??? Check PSW_P and ent->access_prot. This can remove PAGE_WRITE. */
132 /* No guest access type indicates a non-architectural access from
133 within QEMU. Bypass checks for access, D, B and T bits. */
134 if (type == 0) {
135 goto egress;
138 if (unlikely(!(prot & type))) {
139 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
140 ret = (type & PAGE_EXEC ? EXCP_IMP :
141 prot & PAGE_READ ? EXCP_DMP : EXCP_DMAR);
142 goto egress;
145 /* In reverse priority order, check for conditions which raise faults.
146 As we go, remove PROT bits that cover the condition we want to check.
147 In this way, the resulting PROT will force a re-check of the
148 architectural TLB entry for the next access. */
149 if (unlikely(!ent->d)) {
150 if (type & PAGE_WRITE) {
151 /* The D bit is not set -- TLB Dirty Bit Fault. */
152 ret = EXCP_TLB_DIRTY;
154 prot &= PAGE_READ | PAGE_EXEC;
156 if (unlikely(ent->b)) {
157 if (type & PAGE_WRITE) {
158 /* The B bit is set -- Data Memory Break Fault. */
159 ret = EXCP_DMB;
161 prot &= PAGE_READ | PAGE_EXEC;
163 if (unlikely(ent->t)) {
164 if (!(type & PAGE_EXEC)) {
165 /* The T bit is set -- Page Reference Fault. */
166 ret = EXCP_PAGE_REF;
168 prot &= PAGE_EXEC;
171 egress:
172 *pphys = phys;
173 *pprot = prot;
174 return ret;
177 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
179 HPPACPU *cpu = HPPA_CPU(cs);
180 hwaddr phys;
181 int prot, excp;
183 /* If the (data) mmu is disabled, bypass translation. */
184 /* ??? We really ought to know if the code mmu is disabled too,
185 in order to get the correct debugging dumps. */
186 if (!(cpu->env.psw & PSW_D)) {
187 return addr;
190 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
191 &phys, &prot);
193 /* Since we're translating for debugging, the only error that is a
194 hard error is no translation at all. Otherwise, while a real cpu
195 access might not have permission, the debugger does. */
196 return excp == EXCP_DTLB_MISS ? -1 : phys;
199 void tlb_fill(CPUState *cs, target_ulong addr, int size,
200 MMUAccessType type, int mmu_idx, uintptr_t retaddr)
202 HPPACPU *cpu = HPPA_CPU(cs);
203 int prot, excp, a_prot;
204 hwaddr phys;
206 switch (type) {
207 case MMU_INST_FETCH:
208 a_prot = PAGE_EXEC;
209 break;
210 case MMU_DATA_STORE:
211 a_prot = PAGE_WRITE;
212 break;
213 default:
214 a_prot = PAGE_READ;
215 break;
218 excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx,
219 a_prot, &phys, &prot);
220 if (unlikely(excp >= 0)) {
221 /* Failure. Raise the indicated exception. */
222 cs->exception_index = excp;
223 if (cpu->env.psw & PSW_Q) {
224 /* ??? Needs tweaking for hppa64. */
225 cpu->env.cr[CR_IOR] = addr;
226 cpu->env.cr[CR_ISR] = addr >> 32;
228 cpu_loop_exit_restore(cs, retaddr);
231 /* Success! Store the translation into the QEMU TLB. */
232 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
233 prot, mmu_idx, TARGET_PAGE_SIZE);
236 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
237 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
239 hppa_tlb_entry *empty = NULL;
240 int i;
242 /* Zap any old entries covering ADDR; notice empty entries on the way. */
243 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
244 hppa_tlb_entry *ent = &env->tlb[i];
245 if (!ent->entry_valid) {
246 empty = ent;
247 } else if (ent->va_b <= addr && addr <= ent->va_e) {
248 hppa_flush_tlb_ent(env, ent);
249 empty = ent;
253 /* If we didn't see an empty entry, evict one. */
254 if (empty == NULL) {
255 empty = hppa_alloc_tlb_ent(env);
258 /* Note that empty->entry_valid == 0 already. */
259 empty->va_b = addr & TARGET_PAGE_MASK;
260 empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
261 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
264 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
265 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
267 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
269 if (unlikely(ent == NULL || ent->entry_valid)) {
270 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
271 return;
274 ent->access_id = extract32(reg, 1, 18);
275 ent->u = extract32(reg, 19, 1);
276 ent->ar_pl2 = extract32(reg, 20, 2);
277 ent->ar_pl1 = extract32(reg, 22, 2);
278 ent->ar_type = extract32(reg, 24, 3);
279 ent->b = extract32(reg, 27, 1);
280 ent->d = extract32(reg, 28, 1);
281 ent->t = extract32(reg, 29, 1);
282 ent->entry_valid = 1;
285 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
286 synchronous across all processors. */
287 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
289 CPUHPPAState *env = cpu->env_ptr;
290 target_ulong addr = (target_ulong) data.target_ptr;
291 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
293 if (ent && ent->entry_valid) {
294 hppa_flush_tlb_ent(env, ent);
298 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
300 CPUState *src = CPU(hppa_env_get_cpu(env));
301 CPUState *cpu;
302 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
304 CPU_FOREACH(cpu) {
305 if (cpu != src) {
306 async_run_on_cpu(cpu, ptlb_work, data);
309 async_safe_run_on_cpu(src, ptlb_work, data);
312 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
313 number of pages/entries (we choose all), and is local to the cpu. */
314 void HELPER(ptlbe)(CPUHPPAState *env)
316 CPUState *src = CPU(hppa_env_get_cpu(env));
318 memset(env->tlb, 0, sizeof(env->tlb));
319 tlb_flush_by_mmuidx(src, 0xf);
322 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
324 hwaddr phys;
325 int prot, excp;
327 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
328 &phys, &prot);
329 if (excp >= 0) {
330 if (env->psw & PSW_Q) {
331 /* ??? Needs tweaking for hppa64. */
332 env->cr[CR_IOR] = addr;
333 env->cr[CR_ISR] = addr >> 32;
335 if (excp == EXCP_DTLB_MISS) {
336 excp = EXCP_NA_DTLB_MISS;
338 hppa_dynamic_excp(env, excp, GETPC());
340 return phys;
343 /* Return the ar_type of the TLB at VADDR, or -1. */
344 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
346 hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
347 return ent ? ent->ar_type : -1;
349 #endif /* CONFIG_USER_ONLY */