2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/error-report.h"
26 #include "qemu/qemu-print.h"
27 #include "sysemu/hw_accel.h"
29 #include "mmu-hash64.h"
32 #include "mmu-book3s-v3.h"
33 #include "helper_regs.h"
35 /* #define DEBUG_SLB */
38 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
40 # define LOG_SLB(...) do { } while (0)
47 static ppc_slb_t
*slb_lookup(PowerPCCPU
*cpu
, target_ulong eaddr
)
49 CPUPPCState
*env
= &cpu
->env
;
50 uint64_t esid_256M
, esid_1T
;
53 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
55 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
56 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
58 for (n
= 0; n
< cpu
->hash64_opts
->slb_size
; n
++) {
59 ppc_slb_t
*slb
= &env
->slb
[n
];
61 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
62 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
64 * We check for 1T matches on all MMUs here - if the MMU
65 * doesn't have 1T segment support, we will have prevented 1T
66 * entries from being inserted in the slbmte code.
68 if (((slb
->esid
== esid_256M
) &&
69 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
70 || ((slb
->esid
== esid_1T
) &&
71 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
79 void dump_slb(PowerPCCPU
*cpu
)
81 CPUPPCState
*env
= &cpu
->env
;
85 cpu_synchronize_state(CPU(cpu
));
87 qemu_printf("SLB\tESID\t\t\tVSID\n");
88 for (i
= 0; i
< cpu
->hash64_opts
->slb_size
; i
++) {
89 slbe
= env
->slb
[i
].esid
;
90 slbv
= env
->slb
[i
].vsid
;
91 if (slbe
== 0 && slbv
== 0) {
94 qemu_printf("%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
99 void helper_slbia(CPUPPCState
*env
, uint32_t ih
)
101 PowerPCCPU
*cpu
= env_archcpu(env
);
106 * slbia must always flush all TLB (which is equivalent to ERAT in ppc
107 * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
108 * can overwrite a valid SLB without flushing its lookaside information.
110 * It would be possible to keep the TLB in synch with the SLB by flushing
111 * when a valid entry is overwritten by slbmte, and therefore slbia would
112 * not have to flush unless it evicts a valid SLB entry. However it is
113 * expected that slbmte is more common than slbia, and slbia is usually
114 * going to evict valid SLB entries, so that tradeoff is unlikely to be a
117 * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate
118 * the same SLB entries (everything but entry 0), but differ in what
119 * "lookaside information" is invalidated. TCG can ignore this and flush
122 * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are
126 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
128 starting_entry
= 1; /* default for IH=0,1,2,6 */
130 if (env
->mmu_model
== POWERPC_MMU_3_00
) {
133 /* invalidate no SLBs, but all lookaside information */
138 /* also considers SLB entry 0 */
143 /* treat undefined values as ih==0, and warn */
144 qemu_log_mask(LOG_GUEST_ERROR
,
145 "slbia undefined IH field %u.\n", ih
);
154 for (n
= starting_entry
; n
< cpu
->hash64_opts
->slb_size
; n
++) {
155 ppc_slb_t
*slb
= &env
->slb
[n
];
157 if (!(slb
->esid
& SLB_ESID_V
)) {
160 if (env
->mmu_model
== POWERPC_MMU_3_00
) {
161 if (ih
== 0x3 && (slb
->vsid
& SLB_VSID_C
) == 0) {
162 /* preserves entries with a class value of 0 */
167 slb
->esid
&= ~SLB_ESID_V
;
171 static void __helper_slbie(CPUPPCState
*env
, target_ulong addr
,
174 PowerPCCPU
*cpu
= env_archcpu(env
);
177 slb
= slb_lookup(cpu
, addr
);
182 if (slb
->esid
& SLB_ESID_V
) {
183 slb
->esid
&= ~SLB_ESID_V
;
186 * XXX: given the fact that segment size is 256 MB or 1TB,
187 * and we still don't have a tlb_flush_mask(env, n, mask)
188 * in QEMU, we just invalidate all TLBs
190 env
->tlb_need_flush
|=
191 (global
== false ? TLB_NEED_LOCAL_FLUSH
: TLB_NEED_GLOBAL_FLUSH
);
195 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
197 __helper_slbie(env
, addr
, false);
200 void helper_slbieg(CPUPPCState
*env
, target_ulong addr
)
202 __helper_slbie(env
, addr
, true);
205 int ppc_store_slb(PowerPCCPU
*cpu
, target_ulong slot
,
206 target_ulong esid
, target_ulong vsid
)
208 CPUPPCState
*env
= &cpu
->env
;
209 ppc_slb_t
*slb
= &env
->slb
[slot
];
210 const PPCHash64SegmentPageSizes
*sps
= NULL
;
213 if (slot
>= cpu
->hash64_opts
->slb_size
) {
214 return -1; /* Bad slot number */
216 if (esid
& ~(SLB_ESID_ESID
| SLB_ESID_V
)) {
217 return -1; /* Reserved bits set */
219 if (vsid
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
220 return -1; /* Bad segment size */
222 if ((vsid
& SLB_VSID_B
) && !(ppc_hash64_has(cpu
, PPC_HASH64_1TSEG
))) {
223 return -1; /* 1T segment on MMU that doesn't support it */
226 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
227 const PPCHash64SegmentPageSizes
*sps1
= &cpu
->hash64_opts
->sps
[i
];
229 if (!sps1
->page_shift
) {
233 if ((vsid
& SLB_VSID_LLP_MASK
) == sps1
->slb_enc
) {
240 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
241 " esid 0x"TARGET_FMT_lx
" vsid 0x"TARGET_FMT_lx
,
250 LOG_SLB("%s: " TARGET_FMT_lu
" " TARGET_FMT_lx
" - " TARGET_FMT_lx
251 " => %016" PRIx64
" %016" PRIx64
"\n", __func__
, slot
, esid
, vsid
,
252 slb
->esid
, slb
->vsid
);
257 static int ppc_load_slb_esid(PowerPCCPU
*cpu
, target_ulong rb
,
260 CPUPPCState
*env
= &cpu
->env
;
261 int slot
= rb
& 0xfff;
262 ppc_slb_t
*slb
= &env
->slb
[slot
];
264 if (slot
>= cpu
->hash64_opts
->slb_size
) {
272 static int ppc_load_slb_vsid(PowerPCCPU
*cpu
, target_ulong rb
,
275 CPUPPCState
*env
= &cpu
->env
;
276 int slot
= rb
& 0xfff;
277 ppc_slb_t
*slb
= &env
->slb
[slot
];
279 if (slot
>= cpu
->hash64_opts
->slb_size
) {
287 static int ppc_find_slb_vsid(PowerPCCPU
*cpu
, target_ulong rb
,
290 CPUPPCState
*env
= &cpu
->env
;
293 if (!msr_is_64bit(env
, env
->msr
)) {
296 slb
= slb_lookup(cpu
, rb
);
298 *rt
= (target_ulong
)-1ul;
305 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
307 PowerPCCPU
*cpu
= env_archcpu(env
);
309 if (ppc_store_slb(cpu
, rb
& 0xfff, rb
& ~0xfffULL
, rs
) < 0) {
310 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
311 POWERPC_EXCP_INVAL
, GETPC());
315 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
317 PowerPCCPU
*cpu
= env_archcpu(env
);
320 if (ppc_load_slb_esid(cpu
, rb
, &rt
) < 0) {
321 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
322 POWERPC_EXCP_INVAL
, GETPC());
327 target_ulong
helper_find_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
329 PowerPCCPU
*cpu
= env_archcpu(env
);
332 if (ppc_find_slb_vsid(cpu
, rb
, &rt
) < 0) {
333 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
334 POWERPC_EXCP_INVAL
, GETPC());
339 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
341 PowerPCCPU
*cpu
= env_archcpu(env
);
344 if (ppc_load_slb_vsid(cpu
, rb
, &rt
) < 0) {
345 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
346 POWERPC_EXCP_INVAL
, GETPC());
351 /* Check No-Execute or Guarded Storage */
352 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU
*cpu
,
353 ppc_hash_pte64_t pte
)
355 /* Exec permissions CANNOT take away read or write permissions */
356 return (pte
.pte1
& HPTE64_R_N
) || (pte
.pte1
& HPTE64_R_G
) ?
357 PAGE_READ
| PAGE_WRITE
: PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
360 /* Check Basic Storage Protection */
361 static int ppc_hash64_pte_prot(PowerPCCPU
*cpu
,
362 ppc_slb_t
*slb
, ppc_hash_pte64_t pte
)
364 CPUPPCState
*env
= &cpu
->env
;
367 * Some pp bit combinations have undefined behaviour, so default
368 * to no access in those cases
372 key
= !!(msr_pr
? (slb
->vsid
& SLB_VSID_KP
)
373 : (slb
->vsid
& SLB_VSID_KS
));
374 pp
= (pte
.pte1
& HPTE64_R_PP
) | ((pte
.pte1
& HPTE64_R_PP0
) >> 61);
381 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
386 prot
= PAGE_READ
| PAGE_EXEC
;
397 prot
= PAGE_READ
| PAGE_EXEC
;
401 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
409 /* Check the instruction access permissions specified in the IAMR */
410 static int ppc_hash64_iamr_prot(PowerPCCPU
*cpu
, int key
)
412 CPUPPCState
*env
= &cpu
->env
;
413 int iamr_bits
= (env
->spr
[SPR_IAMR
] >> 2 * (31 - key
)) & 0x3;
416 * An instruction fetch is permitted if the IAMR bit is 0.
417 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
418 * can only take away EXEC permissions not READ or WRITE permissions.
419 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
420 * EXEC permissions are allowed.
422 return (iamr_bits
& 0x1) ? PAGE_READ
| PAGE_WRITE
:
423 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
426 static int ppc_hash64_amr_prot(PowerPCCPU
*cpu
, ppc_hash_pte64_t pte
)
428 CPUPPCState
*env
= &cpu
->env
;
430 int prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
432 /* Only recent MMUs implement Virtual Page Class Key Protection */
433 if (!ppc_hash64_has(cpu
, PPC_HASH64_AMR
)) {
437 key
= HPTE64_R_KEY(pte
.pte1
);
438 amrbits
= (env
->spr
[SPR_AMR
] >> 2 * (31 - key
)) & 0x3;
440 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
441 /* env->spr[SPR_AMR]); */
444 * A store is permitted if the AMR bit is 0. Remove write
445 * protection if it is set.
451 * A load is permitted if the AMR bit is 0. Remove read
452 * protection if it is set.
458 switch (env
->mmu_model
) {
460 * MMU version 2.07 and later support IAMR
461 * Check if the IAMR allows the instruction access - it will return
462 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
463 * if it does (and prot will be unchanged indicating execution support).
465 case POWERPC_MMU_2_07
:
466 case POWERPC_MMU_3_00
:
467 prot
&= ppc_hash64_iamr_prot(cpu
, key
);
476 const ppc_hash_pte64_t
*ppc_hash64_map_hptes(PowerPCCPU
*cpu
,
479 hwaddr pte_offset
= ptex
* HASH_PTE_SIZE_64
;
481 hwaddr plen
= n
* HASH_PTE_SIZE_64
;
482 const ppc_hash_pte64_t
*hptes
;
485 PPCVirtualHypervisorClass
*vhc
=
486 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
487 return vhc
->map_hptes(cpu
->vhyp
, ptex
, n
);
489 base
= ppc_hash64_hpt_base(cpu
);
495 hptes
= address_space_map(CPU(cpu
)->as
, base
+ pte_offset
, &plen
, false,
496 MEMTXATTRS_UNSPECIFIED
);
497 if (plen
< (n
* HASH_PTE_SIZE_64
)) {
498 hw_error("%s: Unable to map all requested HPTEs\n", __func__
);
503 void ppc_hash64_unmap_hptes(PowerPCCPU
*cpu
, const ppc_hash_pte64_t
*hptes
,
507 PPCVirtualHypervisorClass
*vhc
=
508 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
509 vhc
->unmap_hptes(cpu
->vhyp
, hptes
, ptex
, n
);
513 address_space_unmap(CPU(cpu
)->as
, (void *)hptes
, n
* HASH_PTE_SIZE_64
,
514 false, n
* HASH_PTE_SIZE_64
);
517 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes
*sps
,
518 uint64_t pte0
, uint64_t pte1
)
522 if (!(pte0
& HPTE64_V_LARGE
)) {
523 if (sps
->page_shift
!= 12) {
524 /* 4kiB page in a non 4kiB segment */
527 /* Normal 4kiB page */
531 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
532 const PPCHash64PageSize
*ps
= &sps
->enc
[i
];
535 if (!ps
->page_shift
) {
539 if (ps
->page_shift
== 12) {
540 /* L bit is set so this can't be a 4kiB page */
544 mask
= ((1ULL << ps
->page_shift
) - 1) & HPTE64_R_RPN
;
546 if ((pte1
& mask
) == ((uint64_t)ps
->pte_enc
<< HPTE64_R_RPN_SHIFT
)) {
547 return ps
->page_shift
;
551 return 0; /* Bad page size encoding */
554 static void ppc64_v3_new_to_old_hpte(target_ulong
*pte0
, target_ulong
*pte1
)
556 /* Insert B into pte0 */
557 *pte0
= (*pte0
& HPTE64_V_COMMON_BITS
) |
558 ((*pte1
& HPTE64_R_3_0_SSIZE_MASK
) <<
559 (HPTE64_V_SSIZE_SHIFT
- HPTE64_R_3_0_SSIZE_SHIFT
));
561 /* Remove B from pte1 */
562 *pte1
= *pte1
& ~HPTE64_R_3_0_SSIZE_MASK
;
566 static hwaddr
ppc_hash64_pteg_search(PowerPCCPU
*cpu
, hwaddr hash
,
567 const PPCHash64SegmentPageSizes
*sps
,
569 ppc_hash_pte64_t
*pte
, unsigned *pshift
)
572 const ppc_hash_pte64_t
*pteg
;
573 target_ulong pte0
, pte1
;
576 ptex
= (hash
& ppc_hash64_hpt_mask(cpu
)) * HPTES_PER_GROUP
;
577 pteg
= ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
581 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
582 pte0
= ppc_hash64_hpte0(cpu
, pteg
, i
);
584 * pte0 contains the valid bit and must be read before pte1,
585 * otherwise we might see an old pte1 with a new valid bit and
586 * thus an inconsistent hpte value
589 pte1
= ppc_hash64_hpte1(cpu
, pteg
, i
);
591 /* Convert format if necessary */
592 if (cpu
->env
.mmu_model
== POWERPC_MMU_3_00
&& !cpu
->vhyp
) {
593 ppc64_v3_new_to_old_hpte(&pte0
, &pte1
);
596 /* This compares V, B, H (secondary) and the AVPN */
597 if (HPTE64_V_COMPARE(pte0
, ptem
)) {
598 *pshift
= hpte_page_shift(sps
, pte0
, pte1
);
600 * If there is no match, ignore the PTE, it could simply
601 * be for a different segment size encoding and the
602 * architecture specifies we should not match. Linux will
603 * potentially leave behind PTEs for the wrong base page
604 * size when demoting segments.
610 * We don't do anything with pshift yet as qemu TLB only
611 * deals with 4K pages anyway
615 ppc_hash64_unmap_hptes(cpu
, pteg
, ptex
, HPTES_PER_GROUP
);
619 ppc_hash64_unmap_hptes(cpu
, pteg
, ptex
, HPTES_PER_GROUP
);
621 * We didn't find a valid entry.
626 static hwaddr
ppc_hash64_htab_lookup(PowerPCCPU
*cpu
,
627 ppc_slb_t
*slb
, target_ulong eaddr
,
628 ppc_hash_pte64_t
*pte
, unsigned *pshift
)
630 CPUPPCState
*env
= &cpu
->env
;
632 uint64_t vsid
, epnmask
, epn
, ptem
;
633 const PPCHash64SegmentPageSizes
*sps
= slb
->sps
;
636 * The SLB store path should prevent any bad page size encodings
637 * getting in there, so:
641 /* If ISL is set in LPCR we need to clamp the page size to 4K */
642 if (env
->spr
[SPR_LPCR
] & LPCR_ISL
) {
643 /* We assume that when using TCG, 4k is first entry of SPS */
644 sps
= &cpu
->hash64_opts
->sps
[0];
645 assert(sps
->page_shift
== 12);
648 epnmask
= ~((1ULL << sps
->page_shift
) - 1);
650 if (slb
->vsid
& SLB_VSID_B
) {
652 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
653 epn
= (eaddr
& ~SEGMENT_MASK_1T
) & epnmask
;
654 hash
= vsid
^ (vsid
<< 25) ^ (epn
>> sps
->page_shift
);
657 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
658 epn
= (eaddr
& ~SEGMENT_MASK_256M
) & epnmask
;
659 hash
= vsid
^ (epn
>> sps
->page_shift
);
661 ptem
= (slb
->vsid
& SLB_VSID_PTEM
) | ((epn
>> 16) & HPTE64_V_AVPN
);
662 ptem
|= HPTE64_V_VALID
;
664 /* Page address translation */
665 qemu_log_mask(CPU_LOG_MMU
,
666 "htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
667 " hash " TARGET_FMT_plx
"\n",
668 ppc_hash64_hpt_base(cpu
), ppc_hash64_hpt_mask(cpu
), hash
);
670 /* Primary PTEG lookup */
671 qemu_log_mask(CPU_LOG_MMU
,
672 "0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
673 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
674 " hash=" TARGET_FMT_plx
"\n",
675 ppc_hash64_hpt_base(cpu
), ppc_hash64_hpt_mask(cpu
),
677 ptex
= ppc_hash64_pteg_search(cpu
, hash
, sps
, ptem
, pte
, pshift
);
680 /* Secondary PTEG lookup */
681 ptem
|= HPTE64_V_SECONDARY
;
682 qemu_log_mask(CPU_LOG_MMU
,
683 "1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
684 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
685 " hash=" TARGET_FMT_plx
"\n", ppc_hash64_hpt_base(cpu
),
686 ppc_hash64_hpt_mask(cpu
), vsid
, ptem
, ~hash
);
688 ptex
= ppc_hash64_pteg_search(cpu
, ~hash
, sps
, ptem
, pte
, pshift
);
694 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU
*cpu
,
695 uint64_t pte0
, uint64_t pte1
)
699 if (!(pte0
& HPTE64_V_LARGE
)) {
704 * The encodings in env->sps need to be carefully chosen so that
705 * this gives an unambiguous result.
707 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
708 const PPCHash64SegmentPageSizes
*sps
= &cpu
->hash64_opts
->sps
[i
];
711 if (!sps
->page_shift
) {
715 shift
= hpte_page_shift(sps
, pte0
, pte1
);
724 static bool ppc_hash64_use_vrma(CPUPPCState
*env
)
726 switch (env
->mmu_model
) {
727 case POWERPC_MMU_3_00
:
729 * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
730 * register no longer exist
735 return !!(env
->spr
[SPR_LPCR
] & LPCR_VPM0
);
739 static void ppc_hash64_set_isi(CPUState
*cs
, uint64_t error_code
)
741 CPUPPCState
*env
= &POWERPC_CPU(cs
)->env
;
745 vpm
= !!(env
->spr
[SPR_LPCR
] & LPCR_VPM1
);
747 vpm
= ppc_hash64_use_vrma(env
);
749 if (vpm
&& !msr_hv
) {
750 cs
->exception_index
= POWERPC_EXCP_HISI
;
752 cs
->exception_index
= POWERPC_EXCP_ISI
;
754 env
->error_code
= error_code
;
757 static void ppc_hash64_set_dsi(CPUState
*cs
, uint64_t dar
, uint64_t dsisr
)
759 CPUPPCState
*env
= &POWERPC_CPU(cs
)->env
;
763 vpm
= !!(env
->spr
[SPR_LPCR
] & LPCR_VPM1
);
765 vpm
= ppc_hash64_use_vrma(env
);
767 if (vpm
&& !msr_hv
) {
768 cs
->exception_index
= POWERPC_EXCP_HDSI
;
769 env
->spr
[SPR_HDAR
] = dar
;
770 env
->spr
[SPR_HDSISR
] = dsisr
;
772 cs
->exception_index
= POWERPC_EXCP_DSI
;
773 env
->spr
[SPR_DAR
] = dar
;
774 env
->spr
[SPR_DSISR
] = dsisr
;
780 static void ppc_hash64_set_r(PowerPCCPU
*cpu
, hwaddr ptex
, uint64_t pte1
)
782 hwaddr base
, offset
= ptex
* HASH_PTE_SIZE_64
+ 16;
785 PPCVirtualHypervisorClass
*vhc
=
786 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
787 vhc
->hpte_set_r(cpu
->vhyp
, ptex
, pte1
);
790 base
= ppc_hash64_hpt_base(cpu
);
793 /* The HW performs a non-atomic byte update */
794 stb_phys(CPU(cpu
)->as
, base
+ offset
, ((pte1
>> 8) & 0xff) | 0x01);
797 static void ppc_hash64_set_c(PowerPCCPU
*cpu
, hwaddr ptex
, uint64_t pte1
)
799 hwaddr base
, offset
= ptex
* HASH_PTE_SIZE_64
+ 15;
802 PPCVirtualHypervisorClass
*vhc
=
803 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
804 vhc
->hpte_set_c(cpu
->vhyp
, ptex
, pte1
);
807 base
= ppc_hash64_hpt_base(cpu
);
809 /* The HW performs a non-atomic byte update */
810 stb_phys(CPU(cpu
)->as
, base
+ offset
, (pte1
& 0xff) | 0x80);
813 static target_ulong
rmls_limit(PowerPCCPU
*cpu
)
815 CPUPPCState
*env
= &cpu
->env
;
817 * In theory the meanings of RMLS values are implementation
818 * dependent. In practice, this seems to have been the set from
819 * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
821 * Unsupported values mean the OS has shot itself in the
822 * foot. Return a 0-sized RMA in this case, which we expect
823 * to trigger an immediate DSI or ISI
825 static const target_ulong rma_sizes
[16] = {
834 target_ulong rmls
= (env
->spr
[SPR_LPCR
] & LPCR_RMLS
) >> LPCR_RMLS_SHIFT
;
836 return rma_sizes
[rmls
];
839 static int build_vrma_slbe(PowerPCCPU
*cpu
, ppc_slb_t
*slb
)
841 CPUPPCState
*env
= &cpu
->env
;
842 target_ulong lpcr
= env
->spr
[SPR_LPCR
];
843 uint32_t vrmasd
= (lpcr
& LPCR_VRMASD
) >> LPCR_VRMASD_SHIFT
;
844 target_ulong vsid
= SLB_VSID_VRMA
| ((vrmasd
<< 4) & SLB_VSID_LLP_MASK
);
847 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
848 const PPCHash64SegmentPageSizes
*sps
= &cpu
->hash64_opts
->sps
[i
];
850 if (!sps
->page_shift
) {
854 if ((vsid
& SLB_VSID_LLP_MASK
) == sps
->slb_enc
) {
855 slb
->esid
= SLB_ESID_V
;
862 error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
863 TARGET_FMT_lx
, lpcr
);
868 int ppc_hash64_handle_mmu_fault(PowerPCCPU
*cpu
, vaddr eaddr
,
869 int rwx
, int mmu_idx
)
871 CPUState
*cs
= CPU(cpu
);
872 CPUPPCState
*env
= &cpu
->env
;
877 ppc_hash_pte64_t pte
;
878 int exec_prot
, pp_prot
, amr_prot
, prot
;
879 const int need_prot
[] = {PAGE_READ
, PAGE_WRITE
, PAGE_EXEC
};
882 assert((rwx
== 0) || (rwx
== 1) || (rwx
== 2));
885 * Note on LPCR usage: 970 uses HID4, but our special variant of
886 * store_spr copies relevant fields into env->spr[SPR_LPCR].
887 * Similarly we filter unimplemented bits when storing into LPCR
888 * depending on the MMU version. This code can thus just use the
892 /* 1. Handle real mode accesses */
893 if (((rwx
== 2) && (msr_ir
== 0)) || ((rwx
!= 2) && (msr_dr
== 0))) {
895 * Translation is supposedly "off", but in real mode the top 4
896 * effective address bits are (mostly) ignored
898 raddr
= eaddr
& 0x0FFFFFFFFFFFFFFFULL
;
902 * In virtual hypervisor mode, there's nothing to do:
903 * EA == GPA == qemu guest address
905 } else if (msr_hv
|| !env
->has_hv_mode
) {
906 /* In HV mode, add HRMOR if top EA bit is clear */
907 if (!(eaddr
>> 63)) {
908 raddr
|= env
->spr
[SPR_HRMOR
];
910 } else if (ppc_hash64_use_vrma(env
)) {
911 /* Emulated VRMA mode */
913 if (build_vrma_slbe(cpu
, slb
) != 0) {
914 /* Invalid VRMA setup, machine check */
915 cs
->exception_index
= POWERPC_EXCP_MCHECK
;
920 goto skip_slb_search
;
922 target_ulong limit
= rmls_limit(cpu
);
924 /* Emulated old-style RMO mode, bounds check against RMLS */
925 if (raddr
>= limit
) {
927 ppc_hash64_set_isi(cs
, SRR1_PROTFAULT
);
929 int dsisr
= DSISR_PROTFAULT
;
931 dsisr
|= DSISR_ISSTORE
;
933 ppc_hash64_set_dsi(cs
, eaddr
, dsisr
);
938 raddr
|= env
->spr
[SPR_RMOR
];
940 tlb_set_page(cs
, eaddr
& TARGET_PAGE_MASK
, raddr
& TARGET_PAGE_MASK
,
941 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
, mmu_idx
,
946 /* 2. Translation is on, so look up the SLB */
947 slb
= slb_lookup(cpu
, eaddr
);
949 /* No entry found, check if in-memory segment tables are in use */
950 if (ppc64_use_proc_tbl(cpu
)) {
951 /* TODO - Unsupported */
952 error_report("Segment Table Support Unimplemented");
955 /* Segment still not found, generate the appropriate interrupt */
957 cs
->exception_index
= POWERPC_EXCP_ISEG
;
960 cs
->exception_index
= POWERPC_EXCP_DSEG
;
962 env
->spr
[SPR_DAR
] = eaddr
;
969 /* 3. Check for segment level no-execute violation */
970 if ((rwx
== 2) && (slb
->vsid
& SLB_VSID_N
)) {
971 ppc_hash64_set_isi(cs
, SRR1_NOEXEC_GUARD
);
975 /* 4. Locate the PTE in the hash table */
976 ptex
= ppc_hash64_htab_lookup(cpu
, slb
, eaddr
, &pte
, &apshift
);
979 ppc_hash64_set_isi(cs
, SRR1_NOPTE
);
981 int dsisr
= DSISR_NOPTE
;
983 dsisr
|= DSISR_ISSTORE
;
985 ppc_hash64_set_dsi(cs
, eaddr
, dsisr
);
989 qemu_log_mask(CPU_LOG_MMU
,
990 "found PTE at index %08" HWADDR_PRIx
"\n", ptex
);
992 /* 5. Check access permissions */
994 exec_prot
= ppc_hash64_pte_noexec_guard(cpu
, pte
);
995 pp_prot
= ppc_hash64_pte_prot(cpu
, slb
, pte
);
996 amr_prot
= ppc_hash64_amr_prot(cpu
, pte
);
997 prot
= exec_prot
& pp_prot
& amr_prot
;
999 if ((need_prot
[rwx
] & ~prot
) != 0) {
1000 /* Access right violation */
1001 qemu_log_mask(CPU_LOG_MMU
, "PTE access rejected\n");
1004 if (PAGE_EXEC
& ~exec_prot
) {
1005 srr1
|= SRR1_NOEXEC_GUARD
; /* Access violates noexec or guard */
1006 } else if (PAGE_EXEC
& ~pp_prot
) {
1007 srr1
|= SRR1_PROTFAULT
; /* Access violates access authority */
1009 if (PAGE_EXEC
& ~amr_prot
) {
1010 srr1
|= SRR1_IAMR
; /* Access violates virt pg class key prot */
1012 ppc_hash64_set_isi(cs
, srr1
);
1015 if (need_prot
[rwx
] & ~pp_prot
) {
1016 dsisr
|= DSISR_PROTFAULT
;
1019 dsisr
|= DSISR_ISSTORE
;
1021 if (need_prot
[rwx
] & ~amr_prot
) {
1024 ppc_hash64_set_dsi(cs
, eaddr
, dsisr
);
1029 qemu_log_mask(CPU_LOG_MMU
, "PTE access granted !\n");
1031 /* 6. Update PTE referenced and changed bits if necessary */
1033 if (!(pte
.pte1
& HPTE64_R_R
)) {
1034 ppc_hash64_set_r(cpu
, ptex
, pte
.pte1
);
1036 if (!(pte
.pte1
& HPTE64_R_C
)) {
1038 ppc_hash64_set_c(cpu
, ptex
, pte
.pte1
);
1041 * Treat the page as read-only for now, so that a later write
1042 * will pass through this function again to set the C bit
1044 prot
&= ~PAGE_WRITE
;
1048 /* 7. Determine the real address from the PTE */
1050 raddr
= deposit64(pte
.pte1
& HPTE64_R_RPN
, 0, apshift
, eaddr
);
1052 tlb_set_page(cs
, eaddr
& TARGET_PAGE_MASK
, raddr
& TARGET_PAGE_MASK
,
1053 prot
, mmu_idx
, 1ULL << apshift
);
1058 hwaddr
ppc_hash64_get_phys_page_debug(PowerPCCPU
*cpu
, target_ulong addr
)
1060 CPUPPCState
*env
= &cpu
->env
;
1061 ppc_slb_t vrma_slbe
;
1064 ppc_hash_pte64_t pte
;
1067 /* Handle real mode */
1069 /* In real mode the top 4 effective address bits are ignored */
1070 raddr
= addr
& 0x0FFFFFFFFFFFFFFFULL
;
1074 * In virtual hypervisor mode, there's nothing to do:
1075 * EA == GPA == qemu guest address
1078 } else if ((msr_hv
|| !env
->has_hv_mode
) && !(addr
>> 63)) {
1079 /* In HV mode, add HRMOR if top EA bit is clear */
1080 return raddr
| env
->spr
[SPR_HRMOR
];
1081 } else if (ppc_hash64_use_vrma(env
)) {
1082 /* Emulated VRMA mode */
1084 if (build_vrma_slbe(cpu
, slb
) != 0) {
1088 target_ulong limit
= rmls_limit(cpu
);
1090 /* Emulated old-style RMO mode, bounds check against RMLS */
1091 if (raddr
>= limit
) {
1094 return raddr
| env
->spr
[SPR_RMOR
];
1097 slb
= slb_lookup(cpu
, addr
);
1103 ptex
= ppc_hash64_htab_lookup(cpu
, slb
, addr
, &pte
, &apshift
);
1108 return deposit64(pte
.pte1
& HPTE64_R_RPN
, 0, apshift
, addr
)
1112 void ppc_hash64_tlb_flush_hpte(PowerPCCPU
*cpu
, target_ulong ptex
,
1113 target_ulong pte0
, target_ulong pte1
)
1116 * XXX: given the fact that there are too many segments to
1117 * invalidate, and we still don't have a tlb_flush_mask(env, n,
1118 * mask) in QEMU, we just invalidate all TLBs
1120 cpu
->env
.tlb_need_flush
= TLB_NEED_GLOBAL_FLUSH
| TLB_NEED_LOCAL_FLUSH
;
1123 void ppc_store_lpcr(PowerPCCPU
*cpu
, target_ulong val
)
1125 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cpu
);
1126 CPUPPCState
*env
= &cpu
->env
;
1128 env
->spr
[SPR_LPCR
] = val
& pcc
->lpcr_mask
;
1129 /* The gtse bit affects hflags */
1130 hreg_compute_hflags(env
);
1133 void helper_store_lpcr(CPUPPCState
*env
, target_ulong val
)
1135 PowerPCCPU
*cpu
= env_archcpu(env
);
1137 ppc_store_lpcr(cpu
, val
);
1140 void ppc_hash64_init(PowerPCCPU
*cpu
)
1142 CPUPPCState
*env
= &cpu
->env
;
1143 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cpu
);
1145 if (!pcc
->hash64_opts
) {
1146 assert(!mmu_is_64bit(env
->mmu_model
));
1150 cpu
->hash64_opts
= g_memdup(pcc
->hash64_opts
, sizeof(*cpu
->hash64_opts
));
1153 void ppc_hash64_finalize(PowerPCCPU
*cpu
)
1155 g_free(cpu
->hash64_opts
);
1158 const PPCHash64Options ppc_hash64_opts_basic
= {
1162 { .page_shift
= 12, /* 4K */
1164 .enc
= { { .page_shift
= 12, .pte_enc
= 0 } }
1166 { .page_shift
= 24, /* 16M */
1168 .enc
= { { .page_shift
= 24, .pte_enc
= 0 } }
1173 const PPCHash64Options ppc_hash64_opts_POWER7
= {
1174 .flags
= PPC_HASH64_1TSEG
| PPC_HASH64_AMR
| PPC_HASH64_CI_LARGEPAGE
,
1178 .page_shift
= 12, /* 4K */
1180 .enc
= { { .page_shift
= 12, .pte_enc
= 0 },
1181 { .page_shift
= 16, .pte_enc
= 0x7 },
1182 { .page_shift
= 24, .pte_enc
= 0x38 }, },
1185 .page_shift
= 16, /* 64K */
1186 .slb_enc
= SLB_VSID_64K
,
1187 .enc
= { { .page_shift
= 16, .pte_enc
= 0x1 },
1188 { .page_shift
= 24, .pte_enc
= 0x8 }, },
1191 .page_shift
= 24, /* 16M */
1192 .slb_enc
= SLB_VSID_16M
,
1193 .enc
= { { .page_shift
= 24, .pte_enc
= 0 }, },
1196 .page_shift
= 34, /* 16G */
1197 .slb_enc
= SLB_VSID_16G
,
1198 .enc
= { { .page_shift
= 34, .pte_enc
= 0x3 }, },
1203 void ppc_hash64_filter_pagesizes(PowerPCCPU
*cpu
,
1204 bool (*cb
)(void *, uint32_t, uint32_t),
1207 PPCHash64Options
*opts
= cpu
->hash64_opts
;
1210 bool ci_largepage
= false;
1215 for (i
= 0; i
< ARRAY_SIZE(opts
->sps
); i
++) {
1216 PPCHash64SegmentPageSizes
*sps
= &opts
->sps
[i
];
1222 if (!sps
->page_shift
) {
1226 for (j
= 0; j
< ARRAY_SIZE(sps
->enc
); j
++) {
1227 PPCHash64PageSize
*ps
= &sps
->enc
[j
];
1230 if (!ps
->page_shift
) {
1234 if (cb(opaque
, sps
->page_shift
, ps
->page_shift
)) {
1235 if (ps
->page_shift
>= 16) {
1236 ci_largepage
= true;
1238 sps
->enc
[m
++] = *ps
;
1242 /* Clear rest of the row */
1243 for (j
= m
; j
< ARRAY_SIZE(sps
->enc
); j
++) {
1244 memset(&sps
->enc
[j
], 0, sizeof(sps
->enc
[j
]));
1252 /* Clear the rest of the table */
1253 for (i
= n
; i
< ARRAY_SIZE(opts
->sps
); i
++) {
1254 memset(&opts
->sps
[i
], 0, sizeof(opts
->sps
[i
]));
1257 if (!ci_largepage
) {
1258 opts
->flags
&= ~PPC_HASH64_CI_LARGEPAGE
;