2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/helper-proto.h"
23 #include "qemu/error-report.h"
24 #include "sysemu/kvm.h"
25 #include "qemu/error-report.h"
27 #include "mmu-hash64.h"
33 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
35 # define LOG_SLB(...) do { } while (0)
39 * Used to indicate whether we have allocated htab in the
42 bool kvmppc_kern_htab
;
47 static ppc_slb_t
*slb_lookup(PowerPCCPU
*cpu
, target_ulong eaddr
)
49 CPUPPCState
*env
= &cpu
->env
;
50 uint64_t esid_256M
, esid_1T
;
53 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
55 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
56 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
58 for (n
= 0; n
< env
->slb_nr
; n
++) {
59 ppc_slb_t
*slb
= &env
->slb
[n
];
61 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
62 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
63 /* We check for 1T matches on all MMUs here - if the MMU
64 * doesn't have 1T segment support, we will have prevented 1T
65 * entries from being inserted in the slbmte code. */
66 if (((slb
->esid
== esid_256M
) &&
67 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
68 || ((slb
->esid
== esid_1T
) &&
69 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
77 void dump_slb(FILE *f
, fprintf_function cpu_fprintf
, PowerPCCPU
*cpu
)
79 CPUPPCState
*env
= &cpu
->env
;
83 cpu_synchronize_state(CPU(cpu
));
85 cpu_fprintf(f
, "SLB\tESID\t\t\tVSID\n");
86 for (i
= 0; i
< env
->slb_nr
; i
++) {
87 slbe
= env
->slb
[i
].esid
;
88 slbv
= env
->slb
[i
].vsid
;
89 if (slbe
== 0 && slbv
== 0) {
92 cpu_fprintf(f
, "%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
97 void helper_slbia(CPUPPCState
*env
)
99 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
100 int n
, do_invalidate
;
103 /* XXX: Warning: slbia never invalidates the first segment */
104 for (n
= 1; n
< env
->slb_nr
; n
++) {
105 ppc_slb_t
*slb
= &env
->slb
[n
];
107 if (slb
->esid
& SLB_ESID_V
) {
108 slb
->esid
&= ~SLB_ESID_V
;
109 /* XXX: given the fact that segment size is 256 MB or 1TB,
110 * and we still don't have a tlb_flush_mask(env, n, mask)
111 * in QEMU, we just invalidate all TLBs
117 tlb_flush(CPU(cpu
), 1);
121 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
123 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
126 slb
= slb_lookup(cpu
, addr
);
131 if (slb
->esid
& SLB_ESID_V
) {
132 slb
->esid
&= ~SLB_ESID_V
;
134 /* XXX: given the fact that segment size is 256 MB or 1TB,
135 * and we still don't have a tlb_flush_mask(env, n, mask)
136 * in QEMU, we just invalidate all TLBs
138 tlb_flush(CPU(cpu
), 1);
142 int ppc_store_slb(PowerPCCPU
*cpu
, target_ulong slot
,
143 target_ulong esid
, target_ulong vsid
)
145 CPUPPCState
*env
= &cpu
->env
;
146 ppc_slb_t
*slb
= &env
->slb
[slot
];
147 const struct ppc_one_seg_page_size
*sps
= NULL
;
150 if (slot
>= env
->slb_nr
) {
151 return -1; /* Bad slot number */
153 if (esid
& ~(SLB_ESID_ESID
| SLB_ESID_V
)) {
154 return -1; /* Reserved bits set */
156 if (vsid
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
157 return -1; /* Bad segment size */
159 if ((vsid
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
160 return -1; /* 1T segment on MMU that doesn't support it */
163 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
164 const struct ppc_one_seg_page_size
*sps1
= &env
->sps
.sps
[i
];
166 if (!sps1
->page_shift
) {
170 if ((vsid
& SLB_VSID_LLP_MASK
) == sps1
->slb_enc
) {
177 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
178 " esid 0x"TARGET_FMT_lx
" vsid 0x"TARGET_FMT_lx
,
187 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
188 " %016" PRIx64
"\n", __func__
, slot
, esid
, vsid
,
189 slb
->esid
, slb
->vsid
);
194 static int ppc_load_slb_esid(PowerPCCPU
*cpu
, target_ulong rb
,
197 CPUPPCState
*env
= &cpu
->env
;
198 int slot
= rb
& 0xfff;
199 ppc_slb_t
*slb
= &env
->slb
[slot
];
201 if (slot
>= env
->slb_nr
) {
209 static int ppc_load_slb_vsid(PowerPCCPU
*cpu
, target_ulong rb
,
212 CPUPPCState
*env
= &cpu
->env
;
213 int slot
= rb
& 0xfff;
214 ppc_slb_t
*slb
= &env
->slb
[slot
];
216 if (slot
>= env
->slb_nr
) {
224 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
226 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
228 if (ppc_store_slb(cpu
, rb
& 0xfff, rb
& ~0xfffULL
, rs
) < 0) {
229 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
234 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
236 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
239 if (ppc_load_slb_esid(cpu
, rb
, &rt
) < 0) {
240 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
246 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
248 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
251 if (ppc_load_slb_vsid(cpu
, rb
, &rt
) < 0) {
252 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
259 * 64-bit hash table MMU handling
262 static int ppc_hash64_pte_prot(PowerPCCPU
*cpu
,
263 ppc_slb_t
*slb
, ppc_hash_pte64_t pte
)
265 CPUPPCState
*env
= &cpu
->env
;
267 /* Some pp bit combinations have undefined behaviour, so default
268 * to no access in those cases */
271 key
= !!(msr_pr
? (slb
->vsid
& SLB_VSID_KP
)
272 : (slb
->vsid
& SLB_VSID_KS
));
273 pp
= (pte
.pte1
& HPTE64_R_PP
) | ((pte
.pte1
& HPTE64_R_PP0
) >> 61);
280 prot
= PAGE_READ
| PAGE_WRITE
;
301 prot
= PAGE_READ
| PAGE_WRITE
;
306 /* No execute if either noexec or guarded bits set */
307 if (!(pte
.pte1
& HPTE64_R_N
) || (pte
.pte1
& HPTE64_R_G
)
308 || (slb
->vsid
& SLB_VSID_N
)) {
315 static int ppc_hash64_amr_prot(PowerPCCPU
*cpu
, ppc_hash_pte64_t pte
)
317 CPUPPCState
*env
= &cpu
->env
;
319 int prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
321 /* Only recent MMUs implement Virtual Page Class Key Protection */
322 if (!(env
->mmu_model
& POWERPC_MMU_AMR
)) {
326 key
= HPTE64_R_KEY(pte
.pte1
);
327 amrbits
= (env
->spr
[SPR_AMR
] >> 2*(31 - key
)) & 0x3;
329 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
330 /* env->spr[SPR_AMR]); */
333 * A store is permitted if the AMR bit is 0. Remove write
334 * protection if it is set.
340 * A load is permitted if the AMR bit is 0. Remove read
341 * protection if it is set.
350 uint64_t ppc_hash64_start_access(PowerPCCPU
*cpu
, target_ulong pte_index
)
355 pte_offset
= pte_index
* HASH_PTE_SIZE_64
;
356 if (kvmppc_kern_htab
) {
358 * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
360 token
= kvmppc_hash64_read_pteg(cpu
, pte_index
);
365 * pteg read failed, even though we have allocated htab via
371 * HTAB is controlled by QEMU. Just point to the internally
374 if (cpu
->env
.external_htab
) {
375 token
= (uint64_t)(uintptr_t) cpu
->env
.external_htab
+ pte_offset
;
376 } else if (cpu
->env
.htab_base
) {
377 token
= cpu
->env
.htab_base
+ pte_offset
;
382 void ppc_hash64_stop_access(uint64_t token
)
384 if (kvmppc_kern_htab
) {
385 kvmppc_hash64_free_pteg(token
);
389 static hwaddr
ppc_hash64_pteg_search(PowerPCCPU
*cpu
, hwaddr hash
,
390 bool secondary
, target_ulong ptem
,
391 ppc_hash_pte64_t
*pte
)
393 CPUPPCState
*env
= &cpu
->env
;
396 target_ulong pte0
, pte1
;
397 target_ulong pte_index
;
399 pte_index
= (hash
& env
->htab_mask
) * HPTES_PER_GROUP
;
400 token
= ppc_hash64_start_access(cpu
, pte_index
);
404 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
405 pte0
= ppc_hash64_load_hpte0(cpu
, token
, i
);
406 pte1
= ppc_hash64_load_hpte1(cpu
, token
, i
);
408 if ((pte0
& HPTE64_V_VALID
)
409 && (secondary
== !!(pte0
& HPTE64_V_SECONDARY
))
410 && HPTE64_V_COMPARE(pte0
, ptem
)) {
413 ppc_hash64_stop_access(token
);
414 return (pte_index
+ i
) * HASH_PTE_SIZE_64
;
417 ppc_hash64_stop_access(token
);
419 * We didn't find a valid entry.
424 static hwaddr
ppc_hash64_htab_lookup(PowerPCCPU
*cpu
,
425 ppc_slb_t
*slb
, target_ulong eaddr
,
426 ppc_hash_pte64_t
*pte
)
428 CPUPPCState
*env
= &cpu
->env
;
431 uint64_t vsid
, epnmask
, epn
, ptem
;
433 /* The SLB store path should prevent any bad page size encodings
434 * getting in there, so: */
437 epnmask
= ~((1ULL << slb
->sps
->page_shift
) - 1);
439 if (slb
->vsid
& SLB_VSID_B
) {
441 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
442 epn
= (eaddr
& ~SEGMENT_MASK_1T
) & epnmask
;
443 hash
= vsid
^ (vsid
<< 25) ^ (epn
>> slb
->sps
->page_shift
);
446 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
447 epn
= (eaddr
& ~SEGMENT_MASK_256M
) & epnmask
;
448 hash
= vsid
^ (epn
>> slb
->sps
->page_shift
);
450 ptem
= (slb
->vsid
& SLB_VSID_PTEM
) | ((epn
>> 16) & HPTE64_V_AVPN
);
452 /* Page address translation */
453 qemu_log_mask(CPU_LOG_MMU
,
454 "htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
455 " hash " TARGET_FMT_plx
"\n",
456 env
->htab_base
, env
->htab_mask
, hash
);
458 /* Primary PTEG lookup */
459 qemu_log_mask(CPU_LOG_MMU
,
460 "0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
461 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
462 " hash=" TARGET_FMT_plx
"\n",
463 env
->htab_base
, env
->htab_mask
, vsid
, ptem
, hash
);
464 pte_offset
= ppc_hash64_pteg_search(cpu
, hash
, 0, ptem
, pte
);
466 if (pte_offset
== -1) {
467 /* Secondary PTEG lookup */
468 qemu_log_mask(CPU_LOG_MMU
,
469 "1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
470 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
471 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
472 env
->htab_mask
, vsid
, ptem
, ~hash
);
474 pte_offset
= ppc_hash64_pteg_search(cpu
, ~hash
, 1, ptem
, pte
);
480 static unsigned hpte_page_shift(const struct ppc_one_seg_page_size
*sps
,
481 uint64_t pte0
, uint64_t pte1
)
485 if (!(pte0
& HPTE64_V_LARGE
)) {
486 if (sps
->page_shift
!= 12) {
487 /* 4kiB page in a non 4kiB segment */
490 /* Normal 4kiB page */
494 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
495 const struct ppc_one_page_size
*ps
= &sps
->enc
[i
];
498 if (!ps
->page_shift
) {
502 if (ps
->page_shift
== 12) {
503 /* L bit is set so this can't be a 4kiB page */
507 mask
= ((1ULL << ps
->page_shift
) - 1) & HPTE64_R_RPN
;
509 if ((pte1
& mask
) == (ps
->pte_enc
<< HPTE64_R_RPN_SHIFT
)) {
510 return ps
->page_shift
;
514 return 0; /* Bad page size encoding */
517 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU
*cpu
,
518 uint64_t pte0
, uint64_t pte1
,
519 unsigned *seg_page_shift
)
521 CPUPPCState
*env
= &cpu
->env
;
524 if (!(pte0
& HPTE64_V_LARGE
)) {
525 *seg_page_shift
= 12;
530 * The encodings in env->sps need to be carefully chosen so that
531 * this gives an unambiguous result.
533 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
534 const struct ppc_one_seg_page_size
*sps
= &env
->sps
.sps
[i
];
537 if (!sps
->page_shift
) {
541 shift
= hpte_page_shift(sps
, pte0
, pte1
);
543 *seg_page_shift
= sps
->page_shift
;
552 int ppc_hash64_handle_mmu_fault(PowerPCCPU
*cpu
, target_ulong eaddr
,
553 int rwx
, int mmu_idx
)
555 CPUState
*cs
= CPU(cpu
);
556 CPUPPCState
*env
= &cpu
->env
;
560 ppc_hash_pte64_t pte
;
561 int pp_prot
, amr_prot
, prot
;
563 const int need_prot
[] = {PAGE_READ
, PAGE_WRITE
, PAGE_EXEC
};
566 assert((rwx
== 0) || (rwx
== 1) || (rwx
== 2));
568 /* 1. Handle real mode accesses */
569 if (((rwx
== 2) && (msr_ir
== 0)) || ((rwx
!= 2) && (msr_dr
== 0))) {
570 /* Translation is off */
571 /* In real mode the top 4 effective address bits are ignored */
572 raddr
= eaddr
& 0x0FFFFFFFFFFFFFFFULL
;
573 tlb_set_page(cs
, eaddr
& TARGET_PAGE_MASK
, raddr
& TARGET_PAGE_MASK
,
574 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
, mmu_idx
,
579 /* 2. Translation is on, so look up the SLB */
580 slb
= slb_lookup(cpu
, eaddr
);
584 cs
->exception_index
= POWERPC_EXCP_ISEG
;
587 cs
->exception_index
= POWERPC_EXCP_DSEG
;
589 env
->spr
[SPR_DAR
] = eaddr
;
594 /* 3. Check for segment level no-execute violation */
595 if ((rwx
== 2) && (slb
->vsid
& SLB_VSID_N
)) {
596 cs
->exception_index
= POWERPC_EXCP_ISI
;
597 env
->error_code
= 0x10000000;
601 /* 4. Locate the PTE in the hash table */
602 pte_offset
= ppc_hash64_htab_lookup(cpu
, slb
, eaddr
, &pte
);
603 if (pte_offset
== -1) {
605 cs
->exception_index
= POWERPC_EXCP_ISI
;
606 env
->error_code
= 0x40000000;
608 cs
->exception_index
= POWERPC_EXCP_DSI
;
610 env
->spr
[SPR_DAR
] = eaddr
;
612 env
->spr
[SPR_DSISR
] = 0x42000000;
614 env
->spr
[SPR_DSISR
] = 0x40000000;
619 qemu_log_mask(CPU_LOG_MMU
,
620 "found PTE at offset %08" HWADDR_PRIx
"\n", pte_offset
);
622 /* Validate page size encoding */
623 apshift
= hpte_page_shift(slb
->sps
, pte
.pte0
, pte
.pte1
);
625 error_report("Bad page size encoding in HPTE 0x%"PRIx64
" - 0x%"PRIx64
626 " @ 0x%"HWADDR_PRIx
, pte
.pte0
, pte
.pte1
, pte_offset
);
627 /* Not entirely sure what the right action here, but machine
628 * check seems reasonable */
629 cs
->exception_index
= POWERPC_EXCP_MCHECK
;
634 /* 5. Check access permissions */
636 pp_prot
= ppc_hash64_pte_prot(cpu
, slb
, pte
);
637 amr_prot
= ppc_hash64_amr_prot(cpu
, pte
);
638 prot
= pp_prot
& amr_prot
;
640 if ((need_prot
[rwx
] & ~prot
) != 0) {
641 /* Access right violation */
642 qemu_log_mask(CPU_LOG_MMU
, "PTE access rejected\n");
644 cs
->exception_index
= POWERPC_EXCP_ISI
;
645 env
->error_code
= 0x08000000;
647 target_ulong dsisr
= 0;
649 cs
->exception_index
= POWERPC_EXCP_DSI
;
651 env
->spr
[SPR_DAR
] = eaddr
;
652 if (need_prot
[rwx
] & ~pp_prot
) {
658 if (need_prot
[rwx
] & ~amr_prot
) {
661 env
->spr
[SPR_DSISR
] = dsisr
;
666 qemu_log_mask(CPU_LOG_MMU
, "PTE access granted !\n");
668 /* 6. Update PTE referenced and changed bits if necessary */
670 new_pte1
= pte
.pte1
| HPTE64_R_R
; /* set referenced bit */
672 new_pte1
|= HPTE64_R_C
; /* set changed (dirty) bit */
674 /* Treat the page as read-only for now, so that a later write
675 * will pass through this function again to set the C bit */
679 if (new_pte1
!= pte
.pte1
) {
680 ppc_hash64_store_hpte(cpu
, pte_offset
/ HASH_PTE_SIZE_64
,
684 /* 7. Determine the real address from the PTE */
686 raddr
= deposit64(pte
.pte1
& HPTE64_R_RPN
, 0, apshift
, eaddr
);
688 tlb_set_page(cs
, eaddr
& TARGET_PAGE_MASK
, raddr
& TARGET_PAGE_MASK
,
689 prot
, mmu_idx
, 1ULL << apshift
);
694 hwaddr
ppc_hash64_get_phys_page_debug(PowerPCCPU
*cpu
, target_ulong addr
)
696 CPUPPCState
*env
= &cpu
->env
;
699 ppc_hash_pte64_t pte
;
703 /* In real mode the top 4 effective address bits are ignored */
704 return addr
& 0x0FFFFFFFFFFFFFFFULL
;
707 slb
= slb_lookup(cpu
, addr
);
712 pte_offset
= ppc_hash64_htab_lookup(cpu
, slb
, addr
, &pte
);
713 if (pte_offset
== -1) {
717 apshift
= hpte_page_shift(slb
->sps
, pte
.pte0
, pte
.pte1
);
722 return deposit64(pte
.pte1
& HPTE64_R_RPN
, 0, apshift
, addr
)
726 void ppc_hash64_store_hpte(PowerPCCPU
*cpu
,
727 target_ulong pte_index
,
728 target_ulong pte0
, target_ulong pte1
)
730 CPUPPCState
*env
= &cpu
->env
;
732 if (kvmppc_kern_htab
) {
733 kvmppc_hash64_write_pte(env
, pte_index
, pte0
, pte1
);
737 pte_index
*= HASH_PTE_SIZE_64
;
738 if (env
->external_htab
) {
739 stq_p(env
->external_htab
+ pte_index
, pte0
);
740 stq_p(env
->external_htab
+ pte_index
+ HASH_PTE_SIZE_64
/ 2, pte1
);
742 stq_phys(CPU(cpu
)->as
, env
->htab_base
+ pte_index
, pte0
);
743 stq_phys(CPU(cpu
)->as
,
744 env
->htab_base
+ pte_index
+ HASH_PTE_SIZE_64
/ 2, pte1
);
748 void ppc_hash64_tlb_flush_hpte(PowerPCCPU
*cpu
,
749 target_ulong pte_index
,
750 target_ulong pte0
, target_ulong pte1
)
753 * XXX: given the fact that there are too many segments to
754 * invalidate, and we still don't have a tlb_flush_mask(env, n,
755 * mask) in QEMU, we just invalidate all TLBs
757 tlb_flush(CPU(cpu
), 1);