block: Include filters when freezing backing chain
[qemu/kevin.git] / target / ppc / mmu-radix64.c
blobc60bf3135734fa421e19b35087f0df1cd0597ce8
1 /*
2 * PowerPC Radix MMU mulation helpers for QEMU.
4 * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qemu/error-report.h"
25 #include "sysemu/kvm.h"
26 #include "kvm_ppc.h"
27 #include "exec/log.h"
28 #include "mmu-radix64.h"
29 #include "mmu-book3s-v3.h"
31 static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
32 vaddr eaddr,
33 uint64_t *lpid, uint64_t *pid)
35 if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */
36 switch (eaddr & R_EADDR_QUADRANT) {
37 case R_EADDR_QUADRANT0:
38 *lpid = 0;
39 *pid = env->spr[SPR_BOOKS_PID];
40 break;
41 case R_EADDR_QUADRANT1:
42 *lpid = env->spr[SPR_LPIDR];
43 *pid = env->spr[SPR_BOOKS_PID];
44 break;
45 case R_EADDR_QUADRANT2:
46 *lpid = env->spr[SPR_LPIDR];
47 *pid = 0;
48 break;
49 case R_EADDR_QUADRANT3:
50 *lpid = 0;
51 *pid = 0;
52 break;
53 default:
54 g_assert_not_reached();
56 } else { /* !MSR[HV] -> Guest */
57 switch (eaddr & R_EADDR_QUADRANT) {
58 case R_EADDR_QUADRANT0: /* Guest application */
59 *lpid = env->spr[SPR_LPIDR];
60 *pid = env->spr[SPR_BOOKS_PID];
61 break;
62 case R_EADDR_QUADRANT1: /* Illegal */
63 case R_EADDR_QUADRANT2:
64 return false;
65 case R_EADDR_QUADRANT3: /* Guest OS */
66 *lpid = env->spr[SPR_LPIDR];
67 *pid = 0; /* pid set to 0 -> addresses guest operating system */
68 break;
69 default:
70 g_assert_not_reached();
74 return true;
77 static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr)
79 CPUState *cs = CPU(cpu);
80 CPUPPCState *env = &cpu->env;
82 if (rwx == 2) { /* Instruction Segment Interrupt */
83 cs->exception_index = POWERPC_EXCP_ISEG;
84 } else { /* Data Segment Interrupt */
85 cs->exception_index = POWERPC_EXCP_DSEG;
86 env->spr[SPR_DAR] = eaddr;
88 env->error_code = 0;
91 static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr,
92 uint32_t cause)
94 CPUState *cs = CPU(cpu);
95 CPUPPCState *env = &cpu->env;
97 if (rwx == 2) { /* Instruction Storage Interrupt */
98 cs->exception_index = POWERPC_EXCP_ISI;
99 env->error_code = cause;
100 } else { /* Data Storage Interrupt */
101 cs->exception_index = POWERPC_EXCP_DSI;
102 if (rwx == 1) { /* Write -> Store */
103 cause |= DSISR_ISSTORE;
105 env->spr[SPR_DSISR] = cause;
106 env->spr[SPR_DAR] = eaddr;
107 env->error_code = 0;
111 static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, int rwx, vaddr eaddr,
112 hwaddr g_raddr, uint32_t cause)
114 CPUState *cs = CPU(cpu);
115 CPUPPCState *env = &cpu->env;
117 if (rwx == 2) { /* H Instruction Storage Interrupt */
118 cs->exception_index = POWERPC_EXCP_HISI;
119 env->spr[SPR_ASDR] = g_raddr;
120 env->error_code = cause;
121 } else { /* H Data Storage Interrupt */
122 cs->exception_index = POWERPC_EXCP_HDSI;
123 if (rwx == 1) { /* Write -> Store */
124 cause |= DSISR_ISSTORE;
126 env->spr[SPR_HDSISR] = cause;
127 env->spr[SPR_HDAR] = eaddr;
128 env->spr[SPR_ASDR] = g_raddr;
129 env->error_code = 0;
133 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte,
134 int *fault_cause, int *prot,
135 bool partition_scoped)
137 CPUPPCState *env = &cpu->env;
138 const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC };
140 /* Check Page Attributes (pte58:59) */
141 if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) {
143 * Radix PTE entries with the non-idempotent I/O attribute are treated
144 * as guarded storage
146 *fault_cause |= SRR1_NOEXEC_GUARD;
147 return true;
150 /* Determine permissions allowed by Encoded Access Authority */
151 if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) {
152 *prot = 0;
153 } else if (msr_pr || (pte & R_PTE_EAA_PRIV) || partition_scoped) {
154 *prot = ppc_radix64_get_prot_eaa(pte);
155 } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
156 *prot = ppc_radix64_get_prot_eaa(pte);
157 *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
160 /* Check if requested access type is allowed */
161 if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */
162 *fault_cause |= DSISR_PROTFAULT;
163 return true;
166 return false;
169 static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
170 hwaddr pte_addr, int *prot)
172 CPUState *cs = CPU(cpu);
173 uint64_t npte;
175 npte = pte | R_PTE_R; /* Always set reference bit */
177 if (rwx == 1) { /* Store/Write */
178 npte |= R_PTE_C; /* Set change bit */
179 } else {
181 * Treat the page as read-only for now, so that a later write
182 * will pass through this function again to set the C bit.
184 *prot &= ~PAGE_WRITE;
187 if (pte ^ npte) { /* If pte has changed then write it back */
188 stq_phys(cs->as, pte_addr, npte);
192 static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
193 uint64_t *pte_addr, uint64_t *nls,
194 int *psize, uint64_t *pte, int *fault_cause)
196 uint64_t index, pde;
198 if (*nls < 5) { /* Directory maps less than 2**5 entries */
199 *fault_cause |= DSISR_R_BADCONFIG;
200 return 1;
203 /* Read page <directory/table> entry from guest address space */
204 pde = ldq_phys(as, *pte_addr);
205 if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
206 *fault_cause |= DSISR_NOPTE;
207 return 1;
210 *pte = pde;
211 *psize -= *nls;
212 if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
213 *nls = pde & R_PDE_NLS;
214 index = eaddr >> (*psize - *nls); /* Shift */
215 index &= ((1UL << *nls) - 1); /* Mask */
216 *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
218 return 0;
221 static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
222 uint64_t base_addr, uint64_t nls,
223 hwaddr *raddr, int *psize, uint64_t *pte,
224 int *fault_cause, hwaddr *pte_addr)
226 uint64_t index, pde, rpn , mask;
228 if (nls < 5) { /* Directory maps less than 2**5 entries */
229 *fault_cause |= DSISR_R_BADCONFIG;
230 return 1;
233 index = eaddr >> (*psize - nls); /* Shift */
234 index &= ((1UL << nls) - 1); /* Mask */
235 *pte_addr = base_addr + (index * sizeof(pde));
236 do {
237 int ret;
239 ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
240 fault_cause);
241 if (ret) {
242 return ret;
244 } while (!(pde & R_PTE_LEAF));
246 *pte = pde;
247 rpn = pde & R_PTE_RPN;
248 mask = (1UL << *psize) - 1;
250 /* Or high bits of rpn and low bits to ea to form whole real addr */
251 *raddr = (rpn & ~mask) | (eaddr & mask);
252 return 0;
255 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
257 CPUPPCState *env = &cpu->env;
259 if (!(pate->dw0 & PATE0_HR)) {
260 return false;
262 if (lpid == 0 && !msr_hv) {
263 return false;
265 if ((pate->dw0 & PATE1_R_PRTS) < 5) {
266 return false;
268 /* More checks ... */
269 return true;
272 static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, int rwx,
273 vaddr eaddr, hwaddr g_raddr,
274 ppc_v3_pate_t pate,
275 hwaddr *h_raddr, int *h_prot,
276 int *h_page_size, bool pde_addr,
277 bool guest_visible)
279 int fault_cause = 0;
280 hwaddr pte_addr;
281 uint64_t pte;
283 *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
284 /* No valid pte or access denied due to protection */
285 if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
286 pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
287 &pte, &fault_cause, &pte_addr) ||
288 ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, h_prot, true)) {
289 if (pde_addr) { /* address being translated was that of a guest pde */
290 fault_cause |= DSISR_PRTABLE_FAULT;
292 if (guest_visible) {
293 ppc_radix64_raise_hsi(cpu, rwx, eaddr, g_raddr, fault_cause);
295 return 1;
298 if (guest_visible) {
299 ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, h_prot);
302 return 0;
305 static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
306 vaddr eaddr, uint64_t pid,
307 ppc_v3_pate_t pate, hwaddr *g_raddr,
308 int *g_prot, int *g_page_size,
309 bool guest_visible)
311 CPUState *cs = CPU(cpu);
312 CPUPPCState *env = &cpu->env;
313 uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte;
314 int fault_cause = 0, h_page_size, h_prot;
315 hwaddr h_raddr, pte_addr;
316 int ret;
318 /* Index Process Table by PID to Find Corresponding Process Table Entry */
319 offset = pid * sizeof(struct prtb_entry);
320 size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
321 if (offset >= size) {
322 /* offset exceeds size of the process table */
323 if (guest_visible) {
324 ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
326 return 1;
328 prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
330 if (cpu->vhyp) {
331 prtbe0 = ldq_phys(cs->as, prtbe_addr);
332 } else {
334 * Process table addresses are subject to partition-scoped
335 * translation
337 * On a Radix host, the partition-scoped page table for LPID=0
338 * is only used to translate the effective addresses of the
339 * process table entries.
341 ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr,
342 pate, &h_raddr, &h_prot,
343 &h_page_size, true,
344 guest_visible);
345 if (ret) {
346 return ret;
348 prtbe0 = ldq_phys(cs->as, h_raddr);
351 /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
352 *g_page_size = PRTBE_R_GET_RTS(prtbe0);
353 base_addr = prtbe0 & PRTBE_R_RPDB;
354 nls = prtbe0 & PRTBE_R_RPDS;
355 if (msr_hv || cpu->vhyp) {
357 * Can treat process table addresses as real addresses
359 ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
360 nls, g_raddr, g_page_size, &pte,
361 &fault_cause, &pte_addr);
362 if (ret) {
363 /* No valid PTE */
364 if (guest_visible) {
365 ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
367 return ret;
369 } else {
370 uint64_t rpn, mask;
372 index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
373 index &= ((1UL << nls) - 1); /* Mask */
374 pte_addr = base_addr + (index * sizeof(pte));
377 * Each process table address is subject to a partition-scoped
378 * translation
380 do {
381 ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr,
382 pate, &h_raddr, &h_prot,
383 &h_page_size, true,
384 guest_visible);
385 if (ret) {
386 return ret;
389 ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr,
390 &nls, g_page_size, &pte, &fault_cause);
391 if (ret) {
392 /* No valid pte */
393 if (guest_visible) {
394 ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
396 return ret;
398 pte_addr = h_raddr;
399 } while (!(pte & R_PTE_LEAF));
401 rpn = pte & R_PTE_RPN;
402 mask = (1UL << *g_page_size) - 1;
404 /* Or high bits of rpn and low bits to ea to form whole real addr */
405 *g_raddr = (rpn & ~mask) | (eaddr & mask);
408 if (ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot, false)) {
409 /* Access denied due to protection */
410 if (guest_visible) {
411 ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
413 return 1;
416 if (guest_visible) {
417 ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot);
420 return 0;
424 * Radix tree translation is a 2 steps translation process:
426 * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr
427 * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
429 * MSR[HV]
430 * +-------------+----------------+---------------+
431 * | | HV = 0 | HV = 1 |
432 * +-------------+----------------+---------------+
433 * | Relocation | Partition | No |
434 * | = Off | Scoped | Translation |
435 * Relocation +-------------+----------------+---------------+
436 * | Relocation | Partition & | Process |
437 * | = On | Process Scoped | Scoped |
438 * +-------------+----------------+---------------+
440 static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
441 bool relocation,
442 hwaddr *raddr, int *psizep, int *protp,
443 bool guest_visible)
445 CPUPPCState *env = &cpu->env;
446 uint64_t lpid, pid;
447 ppc_v3_pate_t pate;
448 int psize, prot;
449 hwaddr g_raddr;
451 /* Virtual Mode Access - get the fully qualified address */
452 if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
453 if (guest_visible) {
454 ppc_radix64_raise_segi(cpu, rwx, eaddr);
456 return 1;
459 /* Get Process Table */
460 if (cpu->vhyp) {
461 PPCVirtualHypervisorClass *vhc;
462 vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
463 vhc->get_pate(cpu->vhyp, &pate);
464 } else {
465 if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
466 if (guest_visible) {
467 ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
469 return 1;
471 if (!validate_pate(cpu, lpid, &pate)) {
472 if (guest_visible) {
473 ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG);
475 return 1;
479 *psizep = INT_MAX;
480 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
483 * Perform process-scoped translation if relocation enabled.
485 * - Translates an effective address to a host real address in
486 * quadrants 0 and 3 when HV=1.
488 * - Translates an effective address to a guest real address.
490 if (relocation) {
491 int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, pid,
492 pate, &g_raddr, &prot,
493 &psize, guest_visible);
494 if (ret) {
495 return ret;
497 *psizep = MIN(*psizep, psize);
498 *protp &= prot;
499 } else {
500 g_raddr = eaddr & R_EADDR_MASK;
503 if (cpu->vhyp) {
504 *raddr = g_raddr;
505 } else {
507 * Perform partition-scoped translation if !HV or HV access to
508 * quadrants 1 or 2. Translates a guest real address to a host
509 * real address.
511 if (lpid || !msr_hv) {
512 int ret;
514 ret = ppc_radix64_partition_scoped_xlate(cpu, rwx, eaddr, g_raddr,
515 pate, raddr, &prot, &psize,
516 false, guest_visible);
517 if (ret) {
518 return ret;
520 *psizep = MIN(*psizep, psize);
521 *protp &= prot;
522 } else {
523 *raddr = g_raddr;
527 return 0;
530 int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
531 int mmu_idx)
533 CPUState *cs = CPU(cpu);
534 CPUPPCState *env = &cpu->env;
535 int page_size, prot;
536 bool relocation;
537 hwaddr raddr;
539 assert(!(msr_hv && cpu->vhyp));
540 assert((rwx == 0) || (rwx == 1) || (rwx == 2));
542 relocation = ((rwx == 2) && (msr_ir == 1)) || ((rwx != 2) && (msr_dr == 1));
543 /* HV or virtual hypervisor Real Mode Access */
544 if (!relocation && (msr_hv || cpu->vhyp)) {
545 /* In real mode top 4 effective addr bits (mostly) ignored */
546 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
548 /* In HV mode, add HRMOR if top EA bit is clear */
549 if (msr_hv || !env->has_hv_mode) {
550 if (!(eaddr >> 63)) {
551 raddr |= env->spr[SPR_HRMOR];
554 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
555 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
556 TARGET_PAGE_SIZE);
557 return 0;
561 * Check UPRT (we avoid the check in real mode to deal with
562 * transitional states during kexec.
564 if (!ppc64_use_proc_tbl(cpu)) {
565 qemu_log_mask(LOG_GUEST_ERROR,
566 "LPCR:UPRT not set in radix mode ! LPCR="
567 TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
570 /* Translate eaddr to raddr (where raddr is addr qemu needs for access) */
571 if (ppc_radix64_xlate(cpu, eaddr, rwx, relocation, &raddr,
572 &page_size, &prot, true)) {
573 return 1;
576 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
577 prot, mmu_idx, 1UL << page_size);
578 return 0;
581 hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
583 CPUPPCState *env = &cpu->env;
584 int psize, prot;
585 hwaddr raddr;
587 /* Handle Real Mode */
588 if ((msr_dr == 0) && (msr_hv || cpu->vhyp)) {
589 /* In real mode top 4 effective addr bits (mostly) ignored */
590 return eaddr & 0x0FFFFFFFFFFFFFFFULL;
593 if (ppc_radix64_xlate(cpu, eaddr, 0, msr_dr, &raddr, &psize,
594 &prot, false)) {
595 return -1;
598 return raddr & TARGET_PAGE_MASK;