s390x/tcg: We support Vector enhancements facility
[qemu/kevin.git] / target / ppc / mmu-radix64.c
blobb6d191c1d81620493b5127d8a0c610f1bef060a8
1 /*
2 * PowerPC Radix MMU mulation helpers for QEMU.
4 * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/error-report.h"
24 #include "sysemu/kvm.h"
25 #include "kvm_ppc.h"
26 #include "exec/log.h"
27 #include "internal.h"
28 #include "mmu-radix64.h"
29 #include "mmu-book3s-v3.h"
31 static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
32 vaddr eaddr,
33 uint64_t *lpid, uint64_t *pid)
35 if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */
36 switch (eaddr & R_EADDR_QUADRANT) {
37 case R_EADDR_QUADRANT0:
38 *lpid = 0;
39 *pid = env->spr[SPR_BOOKS_PID];
40 break;
41 case R_EADDR_QUADRANT1:
42 *lpid = env->spr[SPR_LPIDR];
43 *pid = env->spr[SPR_BOOKS_PID];
44 break;
45 case R_EADDR_QUADRANT2:
46 *lpid = env->spr[SPR_LPIDR];
47 *pid = 0;
48 break;
49 case R_EADDR_QUADRANT3:
50 *lpid = 0;
51 *pid = 0;
52 break;
53 default:
54 g_assert_not_reached();
56 } else { /* !MSR[HV] -> Guest */
57 switch (eaddr & R_EADDR_QUADRANT) {
58 case R_EADDR_QUADRANT0: /* Guest application */
59 *lpid = env->spr[SPR_LPIDR];
60 *pid = env->spr[SPR_BOOKS_PID];
61 break;
62 case R_EADDR_QUADRANT1: /* Illegal */
63 case R_EADDR_QUADRANT2:
64 return false;
65 case R_EADDR_QUADRANT3: /* Guest OS */
66 *lpid = env->spr[SPR_LPIDR];
67 *pid = 0; /* pid set to 0 -> addresses guest operating system */
68 break;
69 default:
70 g_assert_not_reached();
74 return true;
77 static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type,
78 vaddr eaddr)
80 CPUState *cs = CPU(cpu);
81 CPUPPCState *env = &cpu->env;
83 switch (access_type) {
84 case MMU_INST_FETCH:
85 /* Instruction Segment Interrupt */
86 cs->exception_index = POWERPC_EXCP_ISEG;
87 break;
88 case MMU_DATA_STORE:
89 case MMU_DATA_LOAD:
90 /* Data Segment Interrupt */
91 cs->exception_index = POWERPC_EXCP_DSEG;
92 env->spr[SPR_DAR] = eaddr;
93 break;
94 default:
95 g_assert_not_reached();
97 env->error_code = 0;
100 static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
101 vaddr eaddr, uint32_t cause)
103 CPUState *cs = CPU(cpu);
104 CPUPPCState *env = &cpu->env;
106 switch (access_type) {
107 case MMU_INST_FETCH:
108 /* Instruction Storage Interrupt */
109 cs->exception_index = POWERPC_EXCP_ISI;
110 env->error_code = cause;
111 break;
112 case MMU_DATA_STORE:
113 cause |= DSISR_ISSTORE;
114 /* fall through */
115 case MMU_DATA_LOAD:
116 /* Data Storage Interrupt */
117 cs->exception_index = POWERPC_EXCP_DSI;
118 env->spr[SPR_DSISR] = cause;
119 env->spr[SPR_DAR] = eaddr;
120 env->error_code = 0;
121 break;
122 default:
123 g_assert_not_reached();
127 static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
128 vaddr eaddr, hwaddr g_raddr, uint32_t cause)
130 CPUState *cs = CPU(cpu);
131 CPUPPCState *env = &cpu->env;
133 switch (access_type) {
134 case MMU_INST_FETCH:
135 /* H Instruction Storage Interrupt */
136 cs->exception_index = POWERPC_EXCP_HISI;
137 env->spr[SPR_ASDR] = g_raddr;
138 env->error_code = cause;
139 break;
140 case MMU_DATA_STORE:
141 cause |= DSISR_ISSTORE;
142 /* fall through */
143 case MMU_DATA_LOAD:
144 /* H Data Storage Interrupt */
145 cs->exception_index = POWERPC_EXCP_HDSI;
146 env->spr[SPR_HDSISR] = cause;
147 env->spr[SPR_HDAR] = eaddr;
148 env->spr[SPR_ASDR] = g_raddr;
149 env->error_code = 0;
150 break;
151 default:
152 g_assert_not_reached();
156 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
157 uint64_t pte, int *fault_cause, int *prot,
158 bool partition_scoped)
160 CPUPPCState *env = &cpu->env;
161 int need_prot;
163 /* Check Page Attributes (pte58:59) */
164 if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) {
166 * Radix PTE entries with the non-idempotent I/O attribute are treated
167 * as guarded storage
169 *fault_cause |= SRR1_NOEXEC_GUARD;
170 return true;
173 /* Determine permissions allowed by Encoded Access Authority */
174 if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) {
175 *prot = 0;
176 } else if (msr_pr || (pte & R_PTE_EAA_PRIV) || partition_scoped) {
177 *prot = ppc_radix64_get_prot_eaa(pte);
178 } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
179 *prot = ppc_radix64_get_prot_eaa(pte);
180 *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
183 /* Check if requested access type is allowed */
184 need_prot = prot_for_access_type(access_type);
185 if (need_prot & ~*prot) { /* Page Protected for that Access */
186 *fault_cause |= DSISR_PROTFAULT;
187 return true;
190 return false;
193 static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type,
194 uint64_t pte, hwaddr pte_addr, int *prot)
196 CPUState *cs = CPU(cpu);
197 uint64_t npte;
199 npte = pte | R_PTE_R; /* Always set reference bit */
201 if (access_type == MMU_DATA_STORE) { /* Store/Write */
202 npte |= R_PTE_C; /* Set change bit */
203 } else {
205 * Treat the page as read-only for now, so that a later write
206 * will pass through this function again to set the C bit.
208 *prot &= ~PAGE_WRITE;
211 if (pte ^ npte) { /* If pte has changed then write it back */
212 stq_phys(cs->as, pte_addr, npte);
216 static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
217 uint64_t *pte_addr, uint64_t *nls,
218 int *psize, uint64_t *pte, int *fault_cause)
220 uint64_t index, pde;
222 if (*nls < 5) { /* Directory maps less than 2**5 entries */
223 *fault_cause |= DSISR_R_BADCONFIG;
224 return 1;
227 /* Read page <directory/table> entry from guest address space */
228 pde = ldq_phys(as, *pte_addr);
229 if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
230 *fault_cause |= DSISR_NOPTE;
231 return 1;
234 *pte = pde;
235 *psize -= *nls;
236 if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
237 *nls = pde & R_PDE_NLS;
238 index = eaddr >> (*psize - *nls); /* Shift */
239 index &= ((1UL << *nls) - 1); /* Mask */
240 *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
242 return 0;
245 static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
246 uint64_t base_addr, uint64_t nls,
247 hwaddr *raddr, int *psize, uint64_t *pte,
248 int *fault_cause, hwaddr *pte_addr)
250 uint64_t index, pde, rpn , mask;
252 if (nls < 5) { /* Directory maps less than 2**5 entries */
253 *fault_cause |= DSISR_R_BADCONFIG;
254 return 1;
257 index = eaddr >> (*psize - nls); /* Shift */
258 index &= ((1UL << nls) - 1); /* Mask */
259 *pte_addr = base_addr + (index * sizeof(pde));
260 do {
261 int ret;
263 ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
264 fault_cause);
265 if (ret) {
266 return ret;
268 } while (!(pde & R_PTE_LEAF));
270 *pte = pde;
271 rpn = pde & R_PTE_RPN;
272 mask = (1UL << *psize) - 1;
274 /* Or high bits of rpn and low bits to ea to form whole real addr */
275 *raddr = (rpn & ~mask) | (eaddr & mask);
276 return 0;
279 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
281 CPUPPCState *env = &cpu->env;
283 if (!(pate->dw0 & PATE0_HR)) {
284 return false;
286 if (lpid == 0 && !msr_hv) {
287 return false;
289 if ((pate->dw0 & PATE1_R_PRTS) < 5) {
290 return false;
292 /* More checks ... */
293 return true;
296 static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
297 MMUAccessType access_type,
298 vaddr eaddr, hwaddr g_raddr,
299 ppc_v3_pate_t pate,
300 hwaddr *h_raddr, int *h_prot,
301 int *h_page_size, bool pde_addr,
302 bool guest_visible)
304 int fault_cause = 0;
305 hwaddr pte_addr;
306 uint64_t pte;
308 *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
309 /* No valid pte or access denied due to protection */
310 if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
311 pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
312 &pte, &fault_cause, &pte_addr) ||
313 ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, h_prot, true)) {
314 if (pde_addr) { /* address being translated was that of a guest pde */
315 fault_cause |= DSISR_PRTABLE_FAULT;
317 if (guest_visible) {
318 ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, fault_cause);
320 return 1;
323 if (guest_visible) {
324 ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, h_prot);
327 return 0;
330 static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
331 MMUAccessType access_type,
332 vaddr eaddr, uint64_t pid,
333 ppc_v3_pate_t pate, hwaddr *g_raddr,
334 int *g_prot, int *g_page_size,
335 bool guest_visible)
337 CPUState *cs = CPU(cpu);
338 CPUPPCState *env = &cpu->env;
339 uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte;
340 int fault_cause = 0, h_page_size, h_prot;
341 hwaddr h_raddr, pte_addr;
342 int ret;
344 /* Index Process Table by PID to Find Corresponding Process Table Entry */
345 offset = pid * sizeof(struct prtb_entry);
346 size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
347 if (offset >= size) {
348 /* offset exceeds size of the process table */
349 if (guest_visible) {
350 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
352 return 1;
354 prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
356 if (cpu->vhyp) {
357 prtbe0 = ldq_phys(cs->as, prtbe_addr);
358 } else {
360 * Process table addresses are subject to partition-scoped
361 * translation
363 * On a Radix host, the partition-scoped page table for LPID=0
364 * is only used to translate the effective addresses of the
365 * process table entries.
367 ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr,
368 pate, &h_raddr, &h_prot,
369 &h_page_size, true,
370 guest_visible);
371 if (ret) {
372 return ret;
374 prtbe0 = ldq_phys(cs->as, h_raddr);
377 /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
378 *g_page_size = PRTBE_R_GET_RTS(prtbe0);
379 base_addr = prtbe0 & PRTBE_R_RPDB;
380 nls = prtbe0 & PRTBE_R_RPDS;
381 if (msr_hv || cpu->vhyp) {
383 * Can treat process table addresses as real addresses
385 ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
386 nls, g_raddr, g_page_size, &pte,
387 &fault_cause, &pte_addr);
388 if (ret) {
389 /* No valid PTE */
390 if (guest_visible) {
391 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
393 return ret;
395 } else {
396 uint64_t rpn, mask;
398 index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
399 index &= ((1UL << nls) - 1); /* Mask */
400 pte_addr = base_addr + (index * sizeof(pte));
403 * Each process table address is subject to a partition-scoped
404 * translation
406 do {
407 ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr,
408 pate, &h_raddr, &h_prot,
409 &h_page_size, true,
410 guest_visible);
411 if (ret) {
412 return ret;
415 ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr,
416 &nls, g_page_size, &pte, &fault_cause);
417 if (ret) {
418 /* No valid pte */
419 if (guest_visible) {
420 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
422 return ret;
424 pte_addr = h_raddr;
425 } while (!(pte & R_PTE_LEAF));
427 rpn = pte & R_PTE_RPN;
428 mask = (1UL << *g_page_size) - 1;
430 /* Or high bits of rpn and low bits to ea to form whole real addr */
431 *g_raddr = (rpn & ~mask) | (eaddr & mask);
434 if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, g_prot, false)) {
435 /* Access denied due to protection */
436 if (guest_visible) {
437 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
439 return 1;
442 if (guest_visible) {
443 ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, g_prot);
446 return 0;
450 * Radix tree translation is a 2 steps translation process:
452 * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr
453 * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
455 * MSR[HV]
456 * +-------------+----------------+---------------+
457 * | | HV = 0 | HV = 1 |
458 * +-------------+----------------+---------------+
459 * | Relocation | Partition | No |
460 * | = Off | Scoped | Translation |
461 * Relocation +-------------+----------------+---------------+
462 * | Relocation | Partition & | Process |
463 * | = On | Process Scoped | Scoped |
464 * +-------------+----------------+---------------+
466 static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr,
467 MMUAccessType access_type,
468 bool relocation,
469 hwaddr *raddr, int *psizep, int *protp,
470 bool guest_visible)
472 CPUPPCState *env = &cpu->env;
473 uint64_t lpid, pid;
474 ppc_v3_pate_t pate;
475 int psize, prot;
476 hwaddr g_raddr;
478 /* Virtual Mode Access - get the fully qualified address */
479 if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
480 if (guest_visible) {
481 ppc_radix64_raise_segi(cpu, access_type, eaddr);
483 return 1;
486 /* Get Process Table */
487 if (cpu->vhyp) {
488 PPCVirtualHypervisorClass *vhc;
489 vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
490 vhc->get_pate(cpu->vhyp, &pate);
491 } else {
492 if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
493 if (guest_visible) {
494 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
496 return 1;
498 if (!validate_pate(cpu, lpid, &pate)) {
499 if (guest_visible) {
500 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
502 return 1;
506 *psizep = INT_MAX;
507 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
510 * Perform process-scoped translation if relocation enabled.
512 * - Translates an effective address to a host real address in
513 * quadrants 0 and 3 when HV=1.
515 * - Translates an effective address to a guest real address.
517 if (relocation) {
518 int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid,
519 pate, &g_raddr, &prot,
520 &psize, guest_visible);
521 if (ret) {
522 return ret;
524 *psizep = MIN(*psizep, psize);
525 *protp &= prot;
526 } else {
527 g_raddr = eaddr & R_EADDR_MASK;
530 if (cpu->vhyp) {
531 *raddr = g_raddr;
532 } else {
534 * Perform partition-scoped translation if !HV or HV access to
535 * quadrants 1 or 2. Translates a guest real address to a host
536 * real address.
538 if (lpid || !msr_hv) {
539 int ret;
541 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
542 g_raddr, pate, raddr,
543 &prot, &psize, false,
544 guest_visible);
545 if (ret) {
546 return ret;
548 *psizep = MIN(*psizep, psize);
549 *protp &= prot;
550 } else {
551 *raddr = g_raddr;
555 return 0;
558 int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
559 int mmu_idx)
561 CPUState *cs = CPU(cpu);
562 CPUPPCState *env = &cpu->env;
563 int page_size, prot;
564 bool relocation;
565 MMUAccessType access_type;
566 hwaddr raddr;
568 assert(!(msr_hv && cpu->vhyp));
569 assert((rwx == 0) || (rwx == 1) || (rwx == 2));
570 access_type = rwx;
572 relocation = (access_type == MMU_INST_FETCH ? msr_ir : msr_dr);
573 /* HV or virtual hypervisor Real Mode Access */
574 if (!relocation && (msr_hv || cpu->vhyp)) {
575 /* In real mode top 4 effective addr bits (mostly) ignored */
576 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
578 /* In HV mode, add HRMOR if top EA bit is clear */
579 if (msr_hv || !env->has_hv_mode) {
580 if (!(eaddr >> 63)) {
581 raddr |= env->spr[SPR_HRMOR];
584 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
585 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
586 TARGET_PAGE_SIZE);
587 return 0;
591 * Check UPRT (we avoid the check in real mode to deal with
592 * transitional states during kexec.
594 if (!ppc64_use_proc_tbl(cpu)) {
595 qemu_log_mask(LOG_GUEST_ERROR,
596 "LPCR:UPRT not set in radix mode ! LPCR="
597 TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
600 /* Translate eaddr to raddr (where raddr is addr qemu needs for access) */
601 if (ppc_radix64_xlate(cpu, eaddr, access_type, relocation, &raddr,
602 &page_size, &prot, true)) {
603 return 1;
606 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
607 prot, mmu_idx, 1UL << page_size);
608 return 0;
611 hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
613 CPUPPCState *env = &cpu->env;
614 int psize, prot;
615 hwaddr raddr;
617 /* Handle Real Mode */
618 if ((msr_dr == 0) && (msr_hv || cpu->vhyp)) {
619 /* In real mode top 4 effective addr bits (mostly) ignored */
620 return eaddr & 0x0FFFFFFFFFFFFFFFULL;
623 if (ppc_radix64_xlate(cpu, eaddr, 0, msr_dr, &raddr, &psize,
624 &prot, false)) {
625 return -1;
628 return raddr & TARGET_PAGE_MASK;