2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "qemu/error-report.h"
26 #include "qemu/qemu-print.h"
27 #include "sysemu/hw_accel.h"
29 #include "mmu-hash64.h"
33 #include "mmu-book3s-v3.h"
34 #include "helper_regs.h"
37 #include "exec/helper-proto.h"
40 /* #define DEBUG_SLB */
43 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
45 # define LOG_SLB(...) do { } while (0)
52 static ppc_slb_t
*slb_lookup(PowerPCCPU
*cpu
, target_ulong eaddr
)
54 CPUPPCState
*env
= &cpu
->env
;
55 uint64_t esid_256M
, esid_1T
;
58 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
60 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
61 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
63 for (n
= 0; n
< cpu
->hash64_opts
->slb_size
; n
++) {
64 ppc_slb_t
*slb
= &env
->slb
[n
];
66 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
67 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
69 * We check for 1T matches on all MMUs here - if the MMU
70 * doesn't have 1T segment support, we will have prevented 1T
71 * entries from being inserted in the slbmte code.
73 if (((slb
->esid
== esid_256M
) &&
74 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
75 || ((slb
->esid
== esid_1T
) &&
76 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
84 void dump_slb(PowerPCCPU
*cpu
)
86 CPUPPCState
*env
= &cpu
->env
;
90 cpu_synchronize_state(CPU(cpu
));
92 qemu_printf("SLB\tESID\t\t\tVSID\n");
93 for (i
= 0; i
< cpu
->hash64_opts
->slb_size
; i
++) {
94 slbe
= env
->slb
[i
].esid
;
95 slbv
= env
->slb
[i
].vsid
;
96 if (slbe
== 0 && slbv
== 0) {
99 qemu_printf("%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
105 void helper_SLBIA(CPUPPCState
*env
, uint32_t ih
)
107 PowerPCCPU
*cpu
= env_archcpu(env
);
112 * slbia must always flush all TLB (which is equivalent to ERAT in ppc
113 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
114 * can overwrite a valid SLB without flushing its lookaside information.
116 * It would be possible to keep the TLB in synch with the SLB by flushing
117 * when a valid entry is overwritten by slbmte, and therefore slbia would
118 * not have to flush unless it evicts a valid SLB entry. However it is
119 * expected that slbmte is more common than slbia, and slbia is usually
120 * going to evict valid SLB entries, so that tradeoff is unlikely to be a
123 * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate
124 * the same SLB entries (everything but entry 0), but differ in what
125 * "lookaside information" is invalidated. TCG can ignore this and flush
128 * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are
132 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
134 starting_entry
= 1; /* default for IH=0,1,2,6 */
136 if (env
->mmu_model
== POWERPC_MMU_3_00
) {
139 /* invalidate no SLBs, but all lookaside information */
144 /* also considers SLB entry 0 */
149 /* treat undefined values as ih==0, and warn */
150 qemu_log_mask(LOG_GUEST_ERROR
,
151 "slbia undefined IH field %u.\n", ih
);
160 for (n
= starting_entry
; n
< cpu
->hash64_opts
->slb_size
; n
++) {
161 ppc_slb_t
*slb
= &env
->slb
[n
];
163 if (!(slb
->esid
& SLB_ESID_V
)) {
166 if (env
->mmu_model
== POWERPC_MMU_3_00
) {
167 if (ih
== 0x3 && (slb
->vsid
& SLB_VSID_C
) == 0) {
168 /* preserves entries with a class value of 0 */
173 slb
->esid
&= ~SLB_ESID_V
;
177 #if defined(TARGET_PPC64)
178 void helper_SLBIAG(CPUPPCState
*env
, target_ulong rs
, uint32_t l
)
180 PowerPCCPU
*cpu
= env_archcpu(env
);
184 * slbiag must always flush all TLB (which is equivalent to ERAT in ppc
185 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
186 * can overwrite a valid SLB without flushing its lookaside information.
188 * It would be possible to keep the TLB in synch with the SLB by flushing
189 * when a valid entry is overwritten by slbmte, and therefore slbiag would
190 * not have to flush unless it evicts a valid SLB entry. However it is
191 * expected that slbmte is more common than slbiag, and slbiag is usually
192 * going to evict valid SLB entries, so that tradeoff is unlikely to be a
195 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
197 for (n
= 0; n
< cpu
->hash64_opts
->slb_size
; n
++) {
198 ppc_slb_t
*slb
= &env
->slb
[n
];
199 slb
->esid
&= ~SLB_ESID_V
;
204 static void __helper_slbie(CPUPPCState
*env
, target_ulong addr
,
207 PowerPCCPU
*cpu
= env_archcpu(env
);
210 slb
= slb_lookup(cpu
, addr
);
215 if (slb
->esid
& SLB_ESID_V
) {
216 slb
->esid
&= ~SLB_ESID_V
;
219 * XXX: given the fact that segment size is 256 MB or 1TB,
220 * and we still don't have a tlb_flush_mask(env, n, mask)
221 * in QEMU, we just invalidate all TLBs
223 env
->tlb_need_flush
|=
224 (global
== false ? TLB_NEED_LOCAL_FLUSH
: TLB_NEED_GLOBAL_FLUSH
);
228 void helper_SLBIE(CPUPPCState
*env
, target_ulong addr
)
230 __helper_slbie(env
, addr
, false);
233 void helper_SLBIEG(CPUPPCState
*env
, target_ulong addr
)
235 __helper_slbie(env
, addr
, true);
239 int ppc_store_slb(PowerPCCPU
*cpu
, target_ulong slot
,
240 target_ulong esid
, target_ulong vsid
)
242 CPUPPCState
*env
= &cpu
->env
;
243 ppc_slb_t
*slb
= &env
->slb
[slot
];
244 const PPCHash64SegmentPageSizes
*sps
= NULL
;
247 if (slot
>= cpu
->hash64_opts
->slb_size
) {
248 return -1; /* Bad slot number */
250 if (esid
& ~(SLB_ESID_ESID
| SLB_ESID_V
)) {
251 return -1; /* Reserved bits set */
253 if (vsid
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
254 return -1; /* Bad segment size */
256 if ((vsid
& SLB_VSID_B
) && !(ppc_hash64_has(cpu
, PPC_HASH64_1TSEG
))) {
257 return -1; /* 1T segment on MMU that doesn't support it */
260 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
261 const PPCHash64SegmentPageSizes
*sps1
= &cpu
->hash64_opts
->sps
[i
];
263 if (!sps1
->page_shift
) {
267 if ((vsid
& SLB_VSID_LLP_MASK
) == sps1
->slb_enc
) {
274 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
275 " esid 0x"TARGET_FMT_lx
" vsid 0x"TARGET_FMT_lx
,
284 LOG_SLB("%s: " TARGET_FMT_lu
" " TARGET_FMT_lx
" - " TARGET_FMT_lx
285 " => %016" PRIx64
" %016" PRIx64
"\n", __func__
, slot
, esid
, vsid
,
286 slb
->esid
, slb
->vsid
);
292 static int ppc_load_slb_esid(PowerPCCPU
*cpu
, target_ulong rb
,
295 CPUPPCState
*env
= &cpu
->env
;
296 int slot
= rb
& 0xfff;
297 ppc_slb_t
*slb
= &env
->slb
[slot
];
299 if (slot
>= cpu
->hash64_opts
->slb_size
) {
307 static int ppc_load_slb_vsid(PowerPCCPU
*cpu
, target_ulong rb
,
310 CPUPPCState
*env
= &cpu
->env
;
311 int slot
= rb
& 0xfff;
312 ppc_slb_t
*slb
= &env
->slb
[slot
];
314 if (slot
>= cpu
->hash64_opts
->slb_size
) {
322 static int ppc_find_slb_vsid(PowerPCCPU
*cpu
, target_ulong rb
,
325 CPUPPCState
*env
= &cpu
->env
;
328 if (!msr_is_64bit(env
, env
->msr
)) {
331 slb
= slb_lookup(cpu
, rb
);
333 *rt
= (target_ulong
)-1ul;
340 void helper_SLBMTE(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
342 PowerPCCPU
*cpu
= env_archcpu(env
);
344 if (ppc_store_slb(cpu
, rb
& 0xfff, rb
& ~0xfffULL
, rs
) < 0) {
345 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
346 POWERPC_EXCP_INVAL
, GETPC());
350 target_ulong
helper_SLBMFEE(CPUPPCState
*env
, target_ulong rb
)
352 PowerPCCPU
*cpu
= env_archcpu(env
);
355 if (ppc_load_slb_esid(cpu
, rb
, &rt
) < 0) {
356 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
357 POWERPC_EXCP_INVAL
, GETPC());
362 target_ulong
helper_SLBFEE(CPUPPCState
*env
, target_ulong rb
)
364 PowerPCCPU
*cpu
= env_archcpu(env
);
367 if (ppc_find_slb_vsid(cpu
, rb
, &rt
) < 0) {
368 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
369 POWERPC_EXCP_INVAL
, GETPC());
374 target_ulong
helper_SLBMFEV(CPUPPCState
*env
, target_ulong rb
)
376 PowerPCCPU
*cpu
= env_archcpu(env
);
379 if (ppc_load_slb_vsid(cpu
, rb
, &rt
) < 0) {
380 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
381 POWERPC_EXCP_INVAL
, GETPC());
387 /* Check No-Execute or Guarded Storage */
388 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU
*cpu
,
389 ppc_hash_pte64_t pte
)
391 /* Exec permissions CANNOT take away read or write permissions */
392 return (pte
.pte1
& HPTE64_R_N
) || (pte
.pte1
& HPTE64_R_G
) ?
393 PAGE_READ
| PAGE_WRITE
: PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
396 /* Check Basic Storage Protection */
397 static int ppc_hash64_pte_prot(int mmu_idx
,
398 ppc_slb_t
*slb
, ppc_hash_pte64_t pte
)
402 * Some pp bit combinations have undefined behaviour, so default
403 * to no access in those cases
407 key
= !!(mmuidx_pr(mmu_idx
) ? (slb
->vsid
& SLB_VSID_KP
)
408 : (slb
->vsid
& SLB_VSID_KS
));
409 pp
= (pte
.pte1
& HPTE64_R_PP
) | ((pte
.pte1
& HPTE64_R_PP0
) >> 61);
416 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
421 prot
= PAGE_READ
| PAGE_EXEC
;
432 prot
= PAGE_READ
| PAGE_EXEC
;
436 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
444 /* Check the instruction access permissions specified in the IAMR */
445 static int ppc_hash64_iamr_prot(PowerPCCPU
*cpu
, int key
)
447 CPUPPCState
*env
= &cpu
->env
;
448 int iamr_bits
= (env
->spr
[SPR_IAMR
] >> 2 * (31 - key
)) & 0x3;
451 * An instruction fetch is permitted if the IAMR bit is 0.
452 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
453 * can only take away EXEC permissions not READ or WRITE permissions.
454 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
455 * EXEC permissions are allowed.
457 return (iamr_bits
& 0x1) ? PAGE_READ
| PAGE_WRITE
:
458 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
461 static int ppc_hash64_amr_prot(PowerPCCPU
*cpu
, ppc_hash_pte64_t pte
)
463 CPUPPCState
*env
= &cpu
->env
;
465 int prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
467 /* Only recent MMUs implement Virtual Page Class Key Protection */
468 if (!ppc_hash64_has(cpu
, PPC_HASH64_AMR
)) {
472 key
= HPTE64_R_KEY(pte
.pte1
);
473 amrbits
= (env
->spr
[SPR_AMR
] >> 2 * (31 - key
)) & 0x3;
475 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
476 /* env->spr[SPR_AMR]); */
479 * A store is permitted if the AMR bit is 0. Remove write
480 * protection if it is set.
486 * A load is permitted if the AMR bit is 0. Remove read
487 * protection if it is set.
493 switch (env
->mmu_model
) {
495 * MMU version 2.07 and later support IAMR
496 * Check if the IAMR allows the instruction access - it will return
497 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
498 * if it does (and prot will be unchanged indicating execution support).
500 case POWERPC_MMU_2_07
:
501 case POWERPC_MMU_3_00
:
502 prot
&= ppc_hash64_iamr_prot(cpu
, key
);
511 const ppc_hash_pte64_t
*ppc_hash64_map_hptes(PowerPCCPU
*cpu
,
514 hwaddr pte_offset
= ptex
* HASH_PTE_SIZE_64
;
516 hwaddr plen
= n
* HASH_PTE_SIZE_64
;
517 const ppc_hash_pte64_t
*hptes
;
520 return cpu
->vhyp_class
->map_hptes(cpu
->vhyp
, ptex
, n
);
522 base
= ppc_hash64_hpt_base(cpu
);
528 hptes
= address_space_map(CPU(cpu
)->as
, base
+ pte_offset
, &plen
, false,
529 MEMTXATTRS_UNSPECIFIED
);
530 if (plen
< (n
* HASH_PTE_SIZE_64
)) {
531 hw_error("%s: Unable to map all requested HPTEs\n", __func__
);
536 void ppc_hash64_unmap_hptes(PowerPCCPU
*cpu
, const ppc_hash_pte64_t
*hptes
,
540 cpu
->vhyp_class
->unmap_hptes(cpu
->vhyp
, hptes
, ptex
, n
);
544 address_space_unmap(CPU(cpu
)->as
, (void *)hptes
, n
* HASH_PTE_SIZE_64
,
545 false, n
* HASH_PTE_SIZE_64
);
548 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes
*sps
,
549 uint64_t pte0
, uint64_t pte1
)
553 if (!(pte0
& HPTE64_V_LARGE
)) {
554 if (sps
->page_shift
!= 12) {
555 /* 4kiB page in a non 4kiB segment */
558 /* Normal 4kiB page */
562 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
563 const PPCHash64PageSize
*ps
= &sps
->enc
[i
];
566 if (!ps
->page_shift
) {
570 if (ps
->page_shift
== 12) {
571 /* L bit is set so this can't be a 4kiB page */
575 mask
= ((1ULL << ps
->page_shift
) - 1) & HPTE64_R_RPN
;
577 if ((pte1
& mask
) == ((uint64_t)ps
->pte_enc
<< HPTE64_R_RPN_SHIFT
)) {
578 return ps
->page_shift
;
582 return 0; /* Bad page size encoding */
585 static void ppc64_v3_new_to_old_hpte(target_ulong
*pte0
, target_ulong
*pte1
)
587 /* Insert B into pte0 */
588 *pte0
= (*pte0
& HPTE64_V_COMMON_BITS
) |
589 ((*pte1
& HPTE64_R_3_0_SSIZE_MASK
) <<
590 (HPTE64_V_SSIZE_SHIFT
- HPTE64_R_3_0_SSIZE_SHIFT
));
592 /* Remove B from pte1 */
593 *pte1
= *pte1
& ~HPTE64_R_3_0_SSIZE_MASK
;
597 static hwaddr
ppc_hash64_pteg_search(PowerPCCPU
*cpu
, hwaddr hash
,
598 const PPCHash64SegmentPageSizes
*sps
,
600 ppc_hash_pte64_t
*pte
, unsigned *pshift
)
603 const ppc_hash_pte64_t
*pteg
;
604 target_ulong pte0
, pte1
;
607 ptex
= (hash
& ppc_hash64_hpt_mask(cpu
)) * HPTES_PER_GROUP
;
608 pteg
= ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
612 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
613 pte0
= ppc_hash64_hpte0(cpu
, pteg
, i
);
615 * pte0 contains the valid bit and must be read before pte1,
616 * otherwise we might see an old pte1 with a new valid bit and
617 * thus an inconsistent hpte value
620 pte1
= ppc_hash64_hpte1(cpu
, pteg
, i
);
622 /* Convert format if necessary */
623 if (cpu
->env
.mmu_model
== POWERPC_MMU_3_00
&& !cpu
->vhyp
) {
624 ppc64_v3_new_to_old_hpte(&pte0
, &pte1
);
627 /* This compares V, B, H (secondary) and the AVPN */
628 if (HPTE64_V_COMPARE(pte0
, ptem
)) {
629 *pshift
= hpte_page_shift(sps
, pte0
, pte1
);
631 * If there is no match, ignore the PTE, it could simply
632 * be for a different segment size encoding and the
633 * architecture specifies we should not match. Linux will
634 * potentially leave behind PTEs for the wrong base page
635 * size when demoting segments.
641 * We don't do anything with pshift yet as qemu TLB only
642 * deals with 4K pages anyway
646 ppc_hash64_unmap_hptes(cpu
, pteg
, ptex
, HPTES_PER_GROUP
);
650 ppc_hash64_unmap_hptes(cpu
, pteg
, ptex
, HPTES_PER_GROUP
);
652 * We didn't find a valid entry.
657 static hwaddr
ppc_hash64_htab_lookup(PowerPCCPU
*cpu
,
658 ppc_slb_t
*slb
, target_ulong eaddr
,
659 ppc_hash_pte64_t
*pte
, unsigned *pshift
)
661 CPUPPCState
*env
= &cpu
->env
;
663 uint64_t vsid
, epnmask
, epn
, ptem
;
664 const PPCHash64SegmentPageSizes
*sps
= slb
->sps
;
667 * The SLB store path should prevent any bad page size encodings
668 * getting in there, so:
672 /* If ISL is set in LPCR we need to clamp the page size to 4K */
673 if (env
->spr
[SPR_LPCR
] & LPCR_ISL
) {
674 /* We assume that when using TCG, 4k is first entry of SPS */
675 sps
= &cpu
->hash64_opts
->sps
[0];
676 assert(sps
->page_shift
== 12);
679 epnmask
= ~((1ULL << sps
->page_shift
) - 1);
681 if (slb
->vsid
& SLB_VSID_B
) {
683 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
684 epn
= (eaddr
& ~SEGMENT_MASK_1T
) & epnmask
;
685 hash
= vsid
^ (vsid
<< 25) ^ (epn
>> sps
->page_shift
);
688 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
689 epn
= (eaddr
& ~SEGMENT_MASK_256M
) & epnmask
;
690 hash
= vsid
^ (epn
>> sps
->page_shift
);
692 ptem
= (slb
->vsid
& SLB_VSID_PTEM
) | ((epn
>> 16) & HPTE64_V_AVPN
);
693 ptem
|= HPTE64_V_VALID
;
695 /* Page address translation */
696 qemu_log_mask(CPU_LOG_MMU
,
697 "htab_base " HWADDR_FMT_plx
" htab_mask " HWADDR_FMT_plx
698 " hash " HWADDR_FMT_plx
"\n",
699 ppc_hash64_hpt_base(cpu
), ppc_hash64_hpt_mask(cpu
), hash
);
701 /* Primary PTEG lookup */
702 qemu_log_mask(CPU_LOG_MMU
,
703 "0 htab=" HWADDR_FMT_plx
"/" HWADDR_FMT_plx
704 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
705 " hash=" HWADDR_FMT_plx
"\n",
706 ppc_hash64_hpt_base(cpu
), ppc_hash64_hpt_mask(cpu
),
708 ptex
= ppc_hash64_pteg_search(cpu
, hash
, sps
, ptem
, pte
, pshift
);
711 /* Secondary PTEG lookup */
712 ptem
|= HPTE64_V_SECONDARY
;
713 qemu_log_mask(CPU_LOG_MMU
,
714 "1 htab=" HWADDR_FMT_plx
"/" HWADDR_FMT_plx
715 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
716 " hash=" HWADDR_FMT_plx
"\n", ppc_hash64_hpt_base(cpu
),
717 ppc_hash64_hpt_mask(cpu
), vsid
, ptem
, ~hash
);
719 ptex
= ppc_hash64_pteg_search(cpu
, ~hash
, sps
, ptem
, pte
, pshift
);
725 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU
*cpu
,
726 uint64_t pte0
, uint64_t pte1
)
730 if (!(pte0
& HPTE64_V_LARGE
)) {
735 * The encodings in env->sps need to be carefully chosen so that
736 * this gives an unambiguous result.
738 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
739 const PPCHash64SegmentPageSizes
*sps
= &cpu
->hash64_opts
->sps
[i
];
742 if (!sps
->page_shift
) {
746 shift
= hpte_page_shift(sps
, pte0
, pte1
);
755 static bool ppc_hash64_use_vrma(CPUPPCState
*env
)
757 switch (env
->mmu_model
) {
758 case POWERPC_MMU_3_00
:
760 * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
761 * register no longer exist
766 return !!(env
->spr
[SPR_LPCR
] & LPCR_VPM0
);
770 static void ppc_hash64_set_isi(CPUState
*cs
, int mmu_idx
, uint64_t slb_vsid
,
773 CPUPPCState
*env
= &POWERPC_CPU(cs
)->env
;
776 if (!mmuidx_real(mmu_idx
)) {
777 vpm
= !!(env
->spr
[SPR_LPCR
] & LPCR_VPM1
);
779 vpm
= ppc_hash64_use_vrma(env
);
781 if (vpm
&& !mmuidx_hv(mmu_idx
)) {
782 cs
->exception_index
= POWERPC_EXCP_HISI
;
783 env
->spr
[SPR_ASDR
] = slb_vsid
;
785 cs
->exception_index
= POWERPC_EXCP_ISI
;
787 env
->error_code
= error_code
;
790 static void ppc_hash64_set_dsi(CPUState
*cs
, int mmu_idx
, uint64_t slb_vsid
,
791 uint64_t dar
, uint64_t dsisr
)
793 CPUPPCState
*env
= &POWERPC_CPU(cs
)->env
;
796 if (!mmuidx_real(mmu_idx
)) {
797 vpm
= !!(env
->spr
[SPR_LPCR
] & LPCR_VPM1
);
799 vpm
= ppc_hash64_use_vrma(env
);
801 if (vpm
&& !mmuidx_hv(mmu_idx
)) {
802 cs
->exception_index
= POWERPC_EXCP_HDSI
;
803 env
->spr
[SPR_HDAR
] = dar
;
804 env
->spr
[SPR_HDSISR
] = dsisr
;
805 env
->spr
[SPR_ASDR
] = slb_vsid
;
807 cs
->exception_index
= POWERPC_EXCP_DSI
;
808 env
->spr
[SPR_DAR
] = dar
;
809 env
->spr
[SPR_DSISR
] = dsisr
;
815 static void ppc_hash64_set_r(PowerPCCPU
*cpu
, hwaddr ptex
, uint64_t pte1
)
817 hwaddr base
, offset
= ptex
* HASH_PTE_SIZE_64
+ HPTE64_DW1_R
;
820 cpu
->vhyp_class
->hpte_set_r(cpu
->vhyp
, ptex
, pte1
);
823 base
= ppc_hash64_hpt_base(cpu
);
826 /* The HW performs a non-atomic byte update */
827 stb_phys(CPU(cpu
)->as
, base
+ offset
, ((pte1
>> 8) & 0xff) | 0x01);
830 static void ppc_hash64_set_c(PowerPCCPU
*cpu
, hwaddr ptex
, uint64_t pte1
)
832 hwaddr base
, offset
= ptex
* HASH_PTE_SIZE_64
+ HPTE64_DW1_C
;
835 cpu
->vhyp_class
->hpte_set_c(cpu
->vhyp
, ptex
, pte1
);
838 base
= ppc_hash64_hpt_base(cpu
);
840 /* The HW performs a non-atomic byte update */
841 stb_phys(CPU(cpu
)->as
, base
+ offset
, (pte1
& 0xff) | 0x80);
844 static target_ulong
rmls_limit(PowerPCCPU
*cpu
)
846 CPUPPCState
*env
= &cpu
->env
;
848 * In theory the meanings of RMLS values are implementation
849 * dependent. In practice, this seems to have been the set from
850 * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
852 * Unsupported values mean the OS has shot itself in the
853 * foot. Return a 0-sized RMA in this case, which we expect
854 * to trigger an immediate DSI or ISI
856 static const target_ulong rma_sizes
[16] = {
865 target_ulong rmls
= (env
->spr
[SPR_LPCR
] & LPCR_RMLS
) >> LPCR_RMLS_SHIFT
;
867 return rma_sizes
[rmls
];
870 /* Return the LLP in SLB_VSID format */
871 static uint64_t get_vrma_llp(PowerPCCPU
*cpu
)
873 CPUPPCState
*env
= &cpu
->env
;
876 if (env
->mmu_model
== POWERPC_MMU_3_00
) {
881 * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base
882 * page size (L||LP equivalent) in the PS field in the HPT partition
885 if (!ppc64_v3_get_pate(cpu
, cpu
->env
.spr
[SPR_LPIDR
], &pate
)) {
886 error_report("Bad VRMA with no partition table entry");
889 ps
= PATE0_GET_PS(pate
.dw0
);
890 /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */
893 llp
= (l
<< SLB_VSID_L_SHIFT
) | (lp
<< SLB_VSID_LP_SHIFT
);
896 uint64_t lpcr
= env
->spr
[SPR_LPCR
];
897 target_ulong vrmasd
= (lpcr
& LPCR_VRMASD
) >> LPCR_VRMASD_SHIFT
;
899 /* VRMASD LLP matches SLB format, just shift and mask it */
900 llp
= (vrmasd
<< SLB_VSID_LP_SHIFT
) & SLB_VSID_LLP_MASK
;
906 static int build_vrma_slbe(PowerPCCPU
*cpu
, ppc_slb_t
*slb
)
908 uint64_t llp
= get_vrma_llp(cpu
);
909 target_ulong vsid
= SLB_VSID_VRMA
| llp
;
912 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
913 const PPCHash64SegmentPageSizes
*sps
= &cpu
->hash64_opts
->sps
[i
];
915 if (!sps
->page_shift
) {
919 if ((vsid
& SLB_VSID_LLP_MASK
) == sps
->slb_enc
) {
920 slb
->esid
= SLB_ESID_V
;
927 error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx
, llp
);
932 bool ppc_hash64_xlate(PowerPCCPU
*cpu
, vaddr eaddr
, MMUAccessType access_type
,
933 hwaddr
*raddrp
, int *psizep
, int *protp
, int mmu_idx
,
936 CPUState
*cs
= CPU(cpu
);
937 CPUPPCState
*env
= &cpu
->env
;
942 ppc_hash_pte64_t pte
;
943 int exec_prot
, pp_prot
, amr_prot
, prot
;
948 * Note on LPCR usage: 970 uses HID4, but our special variant of
949 * store_spr copies relevant fields into env->spr[SPR_LPCR].
950 * Similarly we filter unimplemented bits when storing into LPCR
951 * depending on the MMU version. This code can thus just use the
955 /* 1. Handle real mode accesses */
956 if (mmuidx_real(mmu_idx
)) {
958 * Translation is supposedly "off", but in real mode the top 4
959 * effective address bits are (mostly) ignored
961 raddr
= eaddr
& 0x0FFFFFFFFFFFFFFFULL
;
965 * In virtual hypervisor mode, there's nothing to do:
966 * EA == GPA == qemu guest address
968 } else if (mmuidx_hv(mmu_idx
) || !env
->has_hv_mode
) {
969 /* In HV mode, add HRMOR if top EA bit is clear */
970 if (!(eaddr
>> 63)) {
971 raddr
|= env
->spr
[SPR_HRMOR
];
973 } else if (ppc_hash64_use_vrma(env
)) {
974 /* Emulated VRMA mode */
976 if (build_vrma_slbe(cpu
, slb
) != 0) {
977 /* Invalid VRMA setup, machine check */
979 cs
->exception_index
= POWERPC_EXCP_MCHECK
;
985 goto skip_slb_search
;
987 target_ulong limit
= rmls_limit(cpu
);
989 /* Emulated old-style RMO mode, bounds check against RMLS */
990 if (raddr
>= limit
) {
991 if (!guest_visible
) {
994 switch (access_type
) {
996 ppc_hash64_set_isi(cs
, mmu_idx
, 0, SRR1_PROTFAULT
);
999 ppc_hash64_set_dsi(cs
, mmu_idx
, 0, eaddr
, DSISR_PROTFAULT
);
1001 case MMU_DATA_STORE
:
1002 ppc_hash64_set_dsi(cs
, mmu_idx
, 0, eaddr
,
1003 DSISR_PROTFAULT
| DSISR_ISSTORE
);
1006 g_assert_not_reached();
1011 raddr
|= env
->spr
[SPR_RMOR
];
1015 *protp
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1016 *psizep
= TARGET_PAGE_BITS
;
1020 /* 2. Translation is on, so look up the SLB */
1021 slb
= slb_lookup(cpu
, eaddr
);
1023 /* No entry found, check if in-memory segment tables are in use */
1024 if (ppc64_use_proc_tbl(cpu
)) {
1025 /* TODO - Unsupported */
1026 error_report("Segment Table Support Unimplemented");
1029 /* Segment still not found, generate the appropriate interrupt */
1030 if (!guest_visible
) {
1033 switch (access_type
) {
1034 case MMU_INST_FETCH
:
1035 cs
->exception_index
= POWERPC_EXCP_ISEG
;
1036 env
->error_code
= 0;
1039 case MMU_DATA_STORE
:
1040 cs
->exception_index
= POWERPC_EXCP_DSEG
;
1041 env
->error_code
= 0;
1042 env
->spr
[SPR_DAR
] = eaddr
;
1045 g_assert_not_reached();
1052 /* 3. Check for segment level no-execute violation */
1053 if (access_type
== MMU_INST_FETCH
&& (slb
->vsid
& SLB_VSID_N
)) {
1054 if (guest_visible
) {
1055 ppc_hash64_set_isi(cs
, mmu_idx
, slb
->vsid
, SRR1_NOEXEC_GUARD
);
1060 /* 4. Locate the PTE in the hash table */
1061 ptex
= ppc_hash64_htab_lookup(cpu
, slb
, eaddr
, &pte
, &apshift
);
1063 if (!guest_visible
) {
1066 switch (access_type
) {
1067 case MMU_INST_FETCH
:
1068 ppc_hash64_set_isi(cs
, mmu_idx
, slb
->vsid
, SRR1_NOPTE
);
1071 ppc_hash64_set_dsi(cs
, mmu_idx
, slb
->vsid
, eaddr
, DSISR_NOPTE
);
1073 case MMU_DATA_STORE
:
1074 ppc_hash64_set_dsi(cs
, mmu_idx
, slb
->vsid
, eaddr
,
1075 DSISR_NOPTE
| DSISR_ISSTORE
);
1078 g_assert_not_reached();
1082 qemu_log_mask(CPU_LOG_MMU
,
1083 "found PTE at index %08" HWADDR_PRIx
"\n", ptex
);
1085 /* 5. Check access permissions */
1087 exec_prot
= ppc_hash64_pte_noexec_guard(cpu
, pte
);
1088 pp_prot
= ppc_hash64_pte_prot(mmu_idx
, slb
, pte
);
1089 amr_prot
= ppc_hash64_amr_prot(cpu
, pte
);
1090 prot
= exec_prot
& pp_prot
& amr_prot
;
1092 need_prot
= check_prot_access_type(PAGE_RWX
, access_type
);
1093 if (need_prot
& ~prot
) {
1094 /* Access right violation */
1095 qemu_log_mask(CPU_LOG_MMU
, "PTE access rejected\n");
1096 if (!guest_visible
) {
1099 if (access_type
== MMU_INST_FETCH
) {
1101 if (PAGE_EXEC
& ~exec_prot
) {
1102 srr1
|= SRR1_NOEXEC_GUARD
; /* Access violates noexec or guard */
1103 } else if (PAGE_EXEC
& ~pp_prot
) {
1104 srr1
|= SRR1_PROTFAULT
; /* Access violates access authority */
1106 if (PAGE_EXEC
& ~amr_prot
) {
1107 srr1
|= SRR1_IAMR
; /* Access violates virt pg class key prot */
1109 ppc_hash64_set_isi(cs
, mmu_idx
, slb
->vsid
, srr1
);
1112 if (need_prot
& ~pp_prot
) {
1113 dsisr
|= DSISR_PROTFAULT
;
1115 if (access_type
== MMU_DATA_STORE
) {
1116 dsisr
|= DSISR_ISSTORE
;
1118 if (need_prot
& ~amr_prot
) {
1121 ppc_hash64_set_dsi(cs
, mmu_idx
, slb
->vsid
, eaddr
, dsisr
);
1126 qemu_log_mask(CPU_LOG_MMU
, "PTE access granted !\n");
1128 /* 6. Update PTE referenced and changed bits if necessary */
1130 if (!(pte
.pte1
& HPTE64_R_R
)) {
1131 ppc_hash64_set_r(cpu
, ptex
, pte
.pte1
);
1133 if (!(pte
.pte1
& HPTE64_R_C
)) {
1134 if (access_type
== MMU_DATA_STORE
) {
1135 ppc_hash64_set_c(cpu
, ptex
, pte
.pte1
);
1138 * Treat the page as read-only for now, so that a later write
1139 * will pass through this function again to set the C bit
1141 prot
&= ~PAGE_WRITE
;
1145 /* 7. Determine the real address from the PTE */
1147 *raddrp
= deposit64(pte
.pte1
& HPTE64_R_RPN
, 0, apshift
, eaddr
);
1153 void ppc_hash64_tlb_flush_hpte(PowerPCCPU
*cpu
, target_ulong ptex
,
1154 target_ulong pte0
, target_ulong pte1
)
1157 * XXX: given the fact that there are too many segments to
1158 * invalidate, and we still don't have a tlb_flush_mask(env, n,
1159 * mask) in QEMU, we just invalidate all TLBs
1161 cpu
->env
.tlb_need_flush
= TLB_NEED_GLOBAL_FLUSH
| TLB_NEED_LOCAL_FLUSH
;
1165 void helper_store_lpcr(CPUPPCState
*env
, target_ulong val
)
1167 PowerPCCPU
*cpu
= env_archcpu(env
);
1169 ppc_store_lpcr(cpu
, val
);
1173 void ppc_hash64_init(PowerPCCPU
*cpu
)
1175 CPUPPCState
*env
= &cpu
->env
;
1176 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cpu
);
1178 if (!pcc
->hash64_opts
) {
1179 assert(!mmu_is_64bit(env
->mmu_model
));
1183 cpu
->hash64_opts
= g_memdup2(pcc
->hash64_opts
, sizeof(*cpu
->hash64_opts
));
1186 void ppc_hash64_finalize(PowerPCCPU
*cpu
)
1188 g_free(cpu
->hash64_opts
);
1191 const PPCHash64Options ppc_hash64_opts_basic
= {
1195 { .page_shift
= 12, /* 4K */
1197 .enc
= { { .page_shift
= 12, .pte_enc
= 0 } }
1199 { .page_shift
= 24, /* 16M */
1201 .enc
= { { .page_shift
= 24, .pte_enc
= 0 } }
1206 const PPCHash64Options ppc_hash64_opts_POWER7
= {
1207 .flags
= PPC_HASH64_1TSEG
| PPC_HASH64_AMR
| PPC_HASH64_CI_LARGEPAGE
,
1211 .page_shift
= 12, /* 4K */
1213 .enc
= { { .page_shift
= 12, .pte_enc
= 0 },
1214 { .page_shift
= 16, .pte_enc
= 0x7 },
1215 { .page_shift
= 24, .pte_enc
= 0x38 }, },
1218 .page_shift
= 16, /* 64K */
1219 .slb_enc
= SLB_VSID_64K
,
1220 .enc
= { { .page_shift
= 16, .pte_enc
= 0x1 },
1221 { .page_shift
= 24, .pte_enc
= 0x8 }, },
1224 .page_shift
= 24, /* 16M */
1225 .slb_enc
= SLB_VSID_16M
,
1226 .enc
= { { .page_shift
= 24, .pte_enc
= 0 }, },
1229 .page_shift
= 34, /* 16G */
1230 .slb_enc
= SLB_VSID_16G
,
1231 .enc
= { { .page_shift
= 34, .pte_enc
= 0x3 }, },