target/s390x: fix pgm irq ilen in translate_pages()
[qemu/ar7.git] / target / s390x / mmu_helper.c
blob1ad01584b4b19c15e3132269fe8840d3ba4af7a9
1 /*
2 * S390x MMU related functions
4 * Copyright (c) 2011 Alexander Graf
5 * Copyright (c) 2015 Thomas Huth, IBM Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include "qemu/osdep.h"
19 #include "qemu/error-report.h"
20 #include "exec/address-spaces.h"
21 #include "cpu.h"
22 #include "sysemu/kvm.h"
23 #include "trace.h"
24 #include "hw/s390x/storage-keys.h"
26 /* #define DEBUG_S390 */
27 /* #define DEBUG_S390_PTE */
28 /* #define DEBUG_S390_STDOUT */
30 #ifdef DEBUG_S390
31 #ifdef DEBUG_S390_STDOUT
32 #define DPRINTF(fmt, ...) \
33 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
34 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
35 #else
36 #define DPRINTF(fmt, ...) \
37 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
38 #endif
39 #else
40 #define DPRINTF(fmt, ...) \
41 do { } while (0)
42 #endif
44 #ifdef DEBUG_S390_PTE
45 #define PTE_DPRINTF DPRINTF
46 #else
47 #define PTE_DPRINTF(fmt, ...) \
48 do { } while (0)
49 #endif
51 /* Fetch/store bits in the translation exception code: */
52 #define FS_READ 0x800
53 #define FS_WRITE 0x400
55 static void trigger_access_exception(CPUS390XState *env, uint32_t type,
56 uint32_t ilen, uint64_t tec)
58 S390CPU *cpu = s390_env_get_cpu(env);
60 if (kvm_enabled()) {
61 kvm_s390_access_exception(cpu, type, tec);
62 } else {
63 CPUState *cs = CPU(cpu);
64 stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
65 trigger_pgm_exception(env, type, ilen);
69 static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
70 uint64_t asc, int rw, bool exc)
72 uint64_t tec;
74 tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | 4 | asc >> 46;
76 DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
78 if (!exc) {
79 return;
82 trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, tec);
85 static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
86 uint32_t type, uint64_t asc, int rw, bool exc)
88 int ilen = ILEN_AUTO;
89 uint64_t tec;
91 tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | asc >> 46;
93 DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
95 if (!exc) {
96 return;
99 /* Code accesses have an undefined ilc. */
100 if (rw == MMU_INST_FETCH) {
101 ilen = 2;
104 trigger_access_exception(env, type, ilen, tec);
108 * Translate real address to absolute (= physical)
109 * address by taking care of the prefix mapping.
111 target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
113 if (raddr < 0x2000) {
114 return raddr + env->psa; /* Map the lowcore. */
115 } else if (raddr >= env->psa && raddr < env->psa + 0x2000) {
116 return raddr - env->psa; /* Map the 0 page. */
118 return raddr;
121 /* Decode page table entry (normal 4KB page) */
122 static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
123 uint64_t asc, uint64_t pt_entry,
124 target_ulong *raddr, int *flags, int rw, bool exc)
126 if (pt_entry & _PAGE_INVALID) {
127 DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, pt_entry);
128 trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw, exc);
129 return -1;
131 if (pt_entry & _PAGE_RES0) {
132 trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
133 return -1;
135 if (pt_entry & _PAGE_RO) {
136 *flags &= ~PAGE_WRITE;
139 *raddr = pt_entry & _ASCE_ORIGIN;
141 PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, pt_entry);
143 return 0;
146 /* Decode segment table entry */
147 static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
148 uint64_t asc, uint64_t st_entry,
149 target_ulong *raddr, int *flags, int rw,
150 bool exc)
152 CPUState *cs = CPU(s390_env_get_cpu(env));
153 uint64_t origin, offs, pt_entry;
155 if (st_entry & _SEGMENT_ENTRY_RO) {
156 *flags &= ~PAGE_WRITE;
159 if ((st_entry & _SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) {
160 /* Decode EDAT1 segment frame absolute address (1MB page) */
161 *raddr = (st_entry & 0xfffffffffff00000ULL) | (vaddr & 0xfffff);
162 PTE_DPRINTF("%s: SEG=0x%" PRIx64 "\n", __func__, st_entry);
163 return 0;
166 /* Look up 4KB page entry */
167 origin = st_entry & _SEGMENT_ENTRY_ORIGIN;
168 offs = (vaddr & VADDR_PX) >> 9;
169 pt_entry = ldq_phys(cs->as, origin + offs);
170 PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
171 __func__, origin, offs, pt_entry);
172 return mmu_translate_pte(env, vaddr, asc, pt_entry, raddr, flags, rw, exc);
175 /* Decode region table entries */
176 static int mmu_translate_region(CPUS390XState *env, target_ulong vaddr,
177 uint64_t asc, uint64_t entry, int level,
178 target_ulong *raddr, int *flags, int rw,
179 bool exc)
181 CPUState *cs = CPU(s390_env_get_cpu(env));
182 uint64_t origin, offs, new_entry;
183 const int pchks[4] = {
184 PGM_SEGMENT_TRANS, PGM_REG_THIRD_TRANS,
185 PGM_REG_SEC_TRANS, PGM_REG_FIRST_TRANS
188 PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, entry);
190 origin = entry & _REGION_ENTRY_ORIGIN;
191 offs = (vaddr >> (17 + 11 * level / 4)) & 0x3ff8;
193 new_entry = ldq_phys(cs->as, origin + offs);
194 PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
195 __func__, origin, offs, new_entry);
197 if ((new_entry & _REGION_ENTRY_INV) != 0) {
198 DPRINTF("%s: invalid region\n", __func__);
199 trigger_page_fault(env, vaddr, pchks[level / 4], asc, rw, exc);
200 return -1;
203 if ((new_entry & _REGION_ENTRY_TYPE_MASK) != level) {
204 trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
205 return -1;
208 if (level == _ASCE_TYPE_SEGMENT) {
209 return mmu_translate_segment(env, vaddr, asc, new_entry, raddr, flags,
210 rw, exc);
213 /* Check region table offset and length */
214 offs = (vaddr >> (28 + 11 * (level - 4) / 4)) & 3;
215 if (offs < ((new_entry & _REGION_ENTRY_TF) >> 6)
216 || offs > (new_entry & _REGION_ENTRY_LENGTH)) {
217 DPRINTF("%s: invalid offset or len (%lx)\n", __func__, new_entry);
218 trigger_page_fault(env, vaddr, pchks[level / 4 - 1], asc, rw, exc);
219 return -1;
222 if ((env->cregs[0] & CR0_EDAT) && (new_entry & _REGION_ENTRY_RO)) {
223 *flags &= ~PAGE_WRITE;
226 /* yet another region */
227 return mmu_translate_region(env, vaddr, asc, new_entry, level - 4,
228 raddr, flags, rw, exc);
231 static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
232 uint64_t asc, uint64_t asce, target_ulong *raddr,
233 int *flags, int rw, bool exc)
235 int level;
236 int r;
238 if (asce & _ASCE_REAL_SPACE) {
239 /* direct mapping */
240 *raddr = vaddr;
241 return 0;
244 level = asce & _ASCE_TYPE_MASK;
245 switch (level) {
246 case _ASCE_TYPE_REGION1:
247 if ((vaddr >> 62) > (asce & _ASCE_TABLE_LENGTH)) {
248 trigger_page_fault(env, vaddr, PGM_REG_FIRST_TRANS, asc, rw, exc);
249 return -1;
251 break;
252 case _ASCE_TYPE_REGION2:
253 if (vaddr & 0xffe0000000000000ULL) {
254 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
255 " 0xffe0000000000000ULL\n", __func__, vaddr);
256 trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
257 return -1;
259 if ((vaddr >> 51 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
260 trigger_page_fault(env, vaddr, PGM_REG_SEC_TRANS, asc, rw, exc);
261 return -1;
263 break;
264 case _ASCE_TYPE_REGION3:
265 if (vaddr & 0xfffffc0000000000ULL) {
266 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
267 " 0xfffffc0000000000ULL\n", __func__, vaddr);
268 trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
269 return -1;
271 if ((vaddr >> 40 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
272 trigger_page_fault(env, vaddr, PGM_REG_THIRD_TRANS, asc, rw, exc);
273 return -1;
275 break;
276 case _ASCE_TYPE_SEGMENT:
277 if (vaddr & 0xffffffff80000000ULL) {
278 DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
279 " 0xffffffff80000000ULL\n", __func__, vaddr);
280 trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
281 return -1;
283 if ((vaddr >> 29 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
284 trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw, exc);
285 return -1;
287 break;
290 r = mmu_translate_region(env, vaddr, asc, asce, level, raddr, flags, rw,
291 exc);
292 if (rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE)) {
293 trigger_prot_fault(env, vaddr, asc, rw, exc);
294 return -1;
297 return r;
301 * Translate a virtual (logical) address into a physical (absolute) address.
302 * @param vaddr the virtual address
303 * @param rw 0 = read, 1 = write, 2 = code fetch
304 * @param asc address space control (one of the PSW_ASC_* modes)
305 * @param raddr the translated address is stored to this pointer
306 * @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer
307 * @param exc true = inject a program check if a fault occurred
308 * @return 0 if the translation was successful, -1 if a fault occurred
310 int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
311 target_ulong *raddr, int *flags, bool exc)
313 static S390SKeysState *ss;
314 static S390SKeysClass *skeyclass;
315 int r = -1;
316 uint8_t key;
318 if (unlikely(!ss)) {
319 ss = s390_get_skeys_device();
320 skeyclass = S390_SKEYS_GET_CLASS(ss);
323 *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
324 vaddr &= TARGET_PAGE_MASK;
326 if (!(env->psw.mask & PSW_MASK_DAT)) {
327 *raddr = vaddr;
328 r = 0;
329 goto out;
332 switch (asc) {
333 case PSW_ASC_PRIMARY:
334 PTE_DPRINTF("%s: asc=primary\n", __func__);
335 r = mmu_translate_asce(env, vaddr, asc, env->cregs[1], raddr, flags,
336 rw, exc);
337 break;
338 case PSW_ASC_HOME:
339 PTE_DPRINTF("%s: asc=home\n", __func__);
340 r = mmu_translate_asce(env, vaddr, asc, env->cregs[13], raddr, flags,
341 rw, exc);
342 break;
343 case PSW_ASC_SECONDARY:
344 PTE_DPRINTF("%s: asc=secondary\n", __func__);
346 * Instruction: Primary
347 * Data: Secondary
349 if (rw == MMU_INST_FETCH) {
350 r = mmu_translate_asce(env, vaddr, PSW_ASC_PRIMARY, env->cregs[1],
351 raddr, flags, rw, exc);
352 *flags &= ~(PAGE_READ | PAGE_WRITE);
353 } else {
354 r = mmu_translate_asce(env, vaddr, PSW_ASC_SECONDARY, env->cregs[7],
355 raddr, flags, rw, exc);
356 *flags &= ~(PAGE_EXEC);
358 break;
359 case PSW_ASC_ACCREG:
360 default:
361 hw_error("guest switched to unknown asc mode\n");
362 break;
365 out:
366 /* Convert real address -> absolute address */
367 *raddr = mmu_real2abs(env, *raddr);
369 if (r == 0 && *raddr < ram_size) {
370 if (skeyclass->get_skeys(ss, *raddr / TARGET_PAGE_SIZE, 1, &key)) {
371 trace_get_skeys_nonzero(r);
372 return 0;
375 if (*flags & PAGE_READ) {
376 key |= SK_R;
379 if (*flags & PAGE_WRITE) {
380 key |= SK_C;
383 if (skeyclass->set_skeys(ss, *raddr / TARGET_PAGE_SIZE, 1, &key)) {
384 trace_set_skeys_nonzero(r);
385 return 0;
389 return r;
393 * lowprot_enabled: Check whether low-address protection is enabled
395 static bool lowprot_enabled(const CPUS390XState *env)
397 if (!(env->cregs[0] & CR0_LOWPROT)) {
398 return false;
400 if (!(env->psw.mask & PSW_MASK_DAT)) {
401 return true;
404 /* Check the private-space control bit */
405 switch (env->psw.mask & PSW_MASK_ASC) {
406 case PSW_ASC_PRIMARY:
407 return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
408 case PSW_ASC_SECONDARY:
409 return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
410 case PSW_ASC_HOME:
411 return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
412 default:
413 /* We don't support access register mode */
414 error_report("unsupported addressing mode");
415 exit(1);
420 * translate_pages: Translate a set of consecutive logical page addresses
421 * to absolute addresses
423 static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
424 target_ulong *pages, bool is_write)
426 bool lowprot = is_write && lowprot_enabled(&cpu->env);
427 uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC;
428 CPUS390XState *env = &cpu->env;
429 int ret, i, pflags;
431 for (i = 0; i < nr_pages; i++) {
432 /* Low-address protection? */
433 if (lowprot && (addr < 512 || (addr >= 4096 && addr < 4096 + 512))) {
434 trigger_access_exception(env, PGM_PROTECTION, ILEN_AUTO, 0);
435 return -EACCES;
437 ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, true);
438 if (ret) {
439 return ret;
441 if (!address_space_access_valid(&address_space_memory, pages[i],
442 TARGET_PAGE_SIZE, is_write)) {
443 program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO);
444 return -EFAULT;
446 addr += TARGET_PAGE_SIZE;
449 return 0;
453 * s390_cpu_virt_mem_rw:
454 * @laddr: the logical start address
455 * @ar: the access register number
456 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying
457 * @len: length that should be transferred
458 * @is_write: true = write, false = read
459 * Returns: 0 on success, non-zero if an exception occurred
461 * Copy from/to guest memory using logical addresses. Note that we inject a
462 * program interrupt in case there is an error while accessing the memory.
464 int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
465 int len, bool is_write)
467 int currlen, nr_pages, i;
468 target_ulong *pages;
469 int ret;
471 if (kvm_enabled()) {
472 ret = kvm_s390_mem_op(cpu, laddr, ar, hostbuf, len, is_write);
473 if (ret >= 0) {
474 return ret;
478 nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS)
479 + 1;
480 pages = g_malloc(nr_pages * sizeof(*pages));
482 ret = translate_pages(cpu, laddr, nr_pages, pages, is_write);
483 if (ret == 0 && hostbuf != NULL) {
484 /* Copy data by stepping through the area page by page */
485 for (i = 0; i < nr_pages; i++) {
486 currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE));
487 cpu_physical_memory_rw(pages[i] | (laddr & ~TARGET_PAGE_MASK),
488 hostbuf, currlen, is_write);
489 laddr += currlen;
490 hostbuf += currlen;
491 len -= currlen;
495 g_free(pages);
496 return ret;