2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qemu/error-report.h"
25 #include "qemu/qemu-print.h"
26 #include "sysemu/hw_accel.h"
28 #include "mmu-hash64.h"
31 #include "mmu-book3s-v3.h"
36 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
38 # define LOG_SLB(...) do { } while (0)
45 static ppc_slb_t
*slb_lookup(PowerPCCPU
*cpu
, target_ulong eaddr
)
47 CPUPPCState
*env
= &cpu
->env
;
48 uint64_t esid_256M
, esid_1T
;
51 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
53 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
54 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
56 for (n
= 0; n
< cpu
->hash64_opts
->slb_size
; n
++) {
57 ppc_slb_t
*slb
= &env
->slb
[n
];
59 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
60 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
61 /* We check for 1T matches on all MMUs here - if the MMU
62 * doesn't have 1T segment support, we will have prevented 1T
63 * entries from being inserted in the slbmte code. */
64 if (((slb
->esid
== esid_256M
) &&
65 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
66 || ((slb
->esid
== esid_1T
) &&
67 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
75 void dump_slb(PowerPCCPU
*cpu
)
77 CPUPPCState
*env
= &cpu
->env
;
81 cpu_synchronize_state(CPU(cpu
));
83 qemu_printf("SLB\tESID\t\t\tVSID\n");
84 for (i
= 0; i
< cpu
->hash64_opts
->slb_size
; i
++) {
85 slbe
= env
->slb
[i
].esid
;
86 slbv
= env
->slb
[i
].vsid
;
87 if (slbe
== 0 && slbv
== 0) {
90 qemu_printf("%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
95 void helper_slbia(CPUPPCState
*env
)
97 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
100 /* XXX: Warning: slbia never invalidates the first segment */
101 for (n
= 1; n
< cpu
->hash64_opts
->slb_size
; n
++) {
102 ppc_slb_t
*slb
= &env
->slb
[n
];
104 if (slb
->esid
& SLB_ESID_V
) {
105 slb
->esid
&= ~SLB_ESID_V
;
106 /* XXX: given the fact that segment size is 256 MB or 1TB,
107 * and we still don't have a tlb_flush_mask(env, n, mask)
108 * in QEMU, we just invalidate all TLBs
110 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
115 static void __helper_slbie(CPUPPCState
*env
, target_ulong addr
,
118 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
121 slb
= slb_lookup(cpu
, addr
);
126 if (slb
->esid
& SLB_ESID_V
) {
127 slb
->esid
&= ~SLB_ESID_V
;
129 /* XXX: given the fact that segment size is 256 MB or 1TB,
130 * and we still don't have a tlb_flush_mask(env, n, mask)
131 * in QEMU, we just invalidate all TLBs
133 env
->tlb_need_flush
|=
134 (global
== false ? TLB_NEED_LOCAL_FLUSH
: TLB_NEED_GLOBAL_FLUSH
);
138 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
140 __helper_slbie(env
, addr
, false);
143 void helper_slbieg(CPUPPCState
*env
, target_ulong addr
)
145 __helper_slbie(env
, addr
, true);
148 int ppc_store_slb(PowerPCCPU
*cpu
, target_ulong slot
,
149 target_ulong esid
, target_ulong vsid
)
151 CPUPPCState
*env
= &cpu
->env
;
152 ppc_slb_t
*slb
= &env
->slb
[slot
];
153 const PPCHash64SegmentPageSizes
*sps
= NULL
;
156 if (slot
>= cpu
->hash64_opts
->slb_size
) {
157 return -1; /* Bad slot number */
159 if (esid
& ~(SLB_ESID_ESID
| SLB_ESID_V
)) {
160 return -1; /* Reserved bits set */
162 if (vsid
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
163 return -1; /* Bad segment size */
165 if ((vsid
& SLB_VSID_B
) && !(ppc_hash64_has(cpu
, PPC_HASH64_1TSEG
))) {
166 return -1; /* 1T segment on MMU that doesn't support it */
169 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
170 const PPCHash64SegmentPageSizes
*sps1
= &cpu
->hash64_opts
->sps
[i
];
172 if (!sps1
->page_shift
) {
176 if ((vsid
& SLB_VSID_LLP_MASK
) == sps1
->slb_enc
) {
183 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
184 " esid 0x"TARGET_FMT_lx
" vsid 0x"TARGET_FMT_lx
,
193 LOG_SLB("%s: " TARGET_FMT_lu
" " TARGET_FMT_lx
" - " TARGET_FMT_lx
194 " => %016" PRIx64
" %016" PRIx64
"\n", __func__
, slot
, esid
, vsid
,
195 slb
->esid
, slb
->vsid
);
200 static int ppc_load_slb_esid(PowerPCCPU
*cpu
, target_ulong rb
,
203 CPUPPCState
*env
= &cpu
->env
;
204 int slot
= rb
& 0xfff;
205 ppc_slb_t
*slb
= &env
->slb
[slot
];
207 if (slot
>= cpu
->hash64_opts
->slb_size
) {
215 static int ppc_load_slb_vsid(PowerPCCPU
*cpu
, target_ulong rb
,
218 CPUPPCState
*env
= &cpu
->env
;
219 int slot
= rb
& 0xfff;
220 ppc_slb_t
*slb
= &env
->slb
[slot
];
222 if (slot
>= cpu
->hash64_opts
->slb_size
) {
230 static int ppc_find_slb_vsid(PowerPCCPU
*cpu
, target_ulong rb
,
233 CPUPPCState
*env
= &cpu
->env
;
236 if (!msr_is_64bit(env
, env
->msr
)) {
239 slb
= slb_lookup(cpu
, rb
);
241 *rt
= (target_ulong
)-1ul;
248 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
250 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
252 if (ppc_store_slb(cpu
, rb
& 0xfff, rb
& ~0xfffULL
, rs
) < 0) {
253 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
254 POWERPC_EXCP_INVAL
, GETPC());
258 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
260 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
263 if (ppc_load_slb_esid(cpu
, rb
, &rt
) < 0) {
264 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
265 POWERPC_EXCP_INVAL
, GETPC());
270 target_ulong
helper_find_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
272 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
275 if (ppc_find_slb_vsid(cpu
, rb
, &rt
) < 0) {
276 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
277 POWERPC_EXCP_INVAL
, GETPC());
282 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
284 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
287 if (ppc_load_slb_vsid(cpu
, rb
, &rt
) < 0) {
288 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
289 POWERPC_EXCP_INVAL
, GETPC());
294 /* Check No-Execute or Guarded Storage */
295 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU
*cpu
,
296 ppc_hash_pte64_t pte
)
298 /* Exec permissions CANNOT take away read or write permissions */
299 return (pte
.pte1
& HPTE64_R_N
) || (pte
.pte1
& HPTE64_R_G
) ?
300 PAGE_READ
| PAGE_WRITE
: PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
303 /* Check Basic Storage Protection */
304 static int ppc_hash64_pte_prot(PowerPCCPU
*cpu
,
305 ppc_slb_t
*slb
, ppc_hash_pte64_t pte
)
307 CPUPPCState
*env
= &cpu
->env
;
309 /* Some pp bit combinations have undefined behaviour, so default
310 * to no access in those cases */
313 key
= !!(msr_pr
? (slb
->vsid
& SLB_VSID_KP
)
314 : (slb
->vsid
& SLB_VSID_KS
));
315 pp
= (pte
.pte1
& HPTE64_R_PP
) | ((pte
.pte1
& HPTE64_R_PP0
) >> 61);
322 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
327 prot
= PAGE_READ
| PAGE_EXEC
;
338 prot
= PAGE_READ
| PAGE_EXEC
;
342 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
350 /* Check the instruction access permissions specified in the IAMR */
351 static int ppc_hash64_iamr_prot(PowerPCCPU
*cpu
, int key
)
353 CPUPPCState
*env
= &cpu
->env
;
354 int iamr_bits
= (env
->spr
[SPR_IAMR
] >> 2 * (31 - key
)) & 0x3;
357 * An instruction fetch is permitted if the IAMR bit is 0.
358 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
359 * can only take away EXEC permissions not READ or WRITE permissions.
360 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
361 * EXEC permissions are allowed.
363 return (iamr_bits
& 0x1) ? PAGE_READ
| PAGE_WRITE
:
364 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
367 static int ppc_hash64_amr_prot(PowerPCCPU
*cpu
, ppc_hash_pte64_t pte
)
369 CPUPPCState
*env
= &cpu
->env
;
371 int prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
373 /* Only recent MMUs implement Virtual Page Class Key Protection */
374 if (!ppc_hash64_has(cpu
, PPC_HASH64_AMR
)) {
378 key
= HPTE64_R_KEY(pte
.pte1
);
379 amrbits
= (env
->spr
[SPR_AMR
] >> 2*(31 - key
)) & 0x3;
381 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
382 /* env->spr[SPR_AMR]); */
385 * A store is permitted if the AMR bit is 0. Remove write
386 * protection if it is set.
392 * A load is permitted if the AMR bit is 0. Remove read
393 * protection if it is set.
399 switch (env
->mmu_model
) {
401 * MMU version 2.07 and later support IAMR
402 * Check if the IAMR allows the instruction access - it will return
403 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
404 * if it does (and prot will be unchanged indicating execution support).
406 case POWERPC_MMU_2_07
:
407 case POWERPC_MMU_3_00
:
408 prot
&= ppc_hash64_iamr_prot(cpu
, key
);
417 const ppc_hash_pte64_t
*ppc_hash64_map_hptes(PowerPCCPU
*cpu
,
420 hwaddr pte_offset
= ptex
* HASH_PTE_SIZE_64
;
422 hwaddr plen
= n
* HASH_PTE_SIZE_64
;
423 const ppc_hash_pte64_t
*hptes
;
426 PPCVirtualHypervisorClass
*vhc
=
427 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
428 return vhc
->map_hptes(cpu
->vhyp
, ptex
, n
);
430 base
= ppc_hash64_hpt_base(cpu
);
436 hptes
= address_space_map(CPU(cpu
)->as
, base
+ pte_offset
, &plen
, false,
437 MEMTXATTRS_UNSPECIFIED
);
438 if (plen
< (n
* HASH_PTE_SIZE_64
)) {
439 hw_error("%s: Unable to map all requested HPTEs\n", __func__
);
444 void ppc_hash64_unmap_hptes(PowerPCCPU
*cpu
, const ppc_hash_pte64_t
*hptes
,
448 PPCVirtualHypervisorClass
*vhc
=
449 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
450 vhc
->unmap_hptes(cpu
->vhyp
, hptes
, ptex
, n
);
454 address_space_unmap(CPU(cpu
)->as
, (void *)hptes
, n
* HASH_PTE_SIZE_64
,
455 false, n
* HASH_PTE_SIZE_64
);
458 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes
*sps
,
459 uint64_t pte0
, uint64_t pte1
)
463 if (!(pte0
& HPTE64_V_LARGE
)) {
464 if (sps
->page_shift
!= 12) {
465 /* 4kiB page in a non 4kiB segment */
468 /* Normal 4kiB page */
472 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
473 const PPCHash64PageSize
*ps
= &sps
->enc
[i
];
476 if (!ps
->page_shift
) {
480 if (ps
->page_shift
== 12) {
481 /* L bit is set so this can't be a 4kiB page */
485 mask
= ((1ULL << ps
->page_shift
) - 1) & HPTE64_R_RPN
;
487 if ((pte1
& mask
) == ((uint64_t)ps
->pte_enc
<< HPTE64_R_RPN_SHIFT
)) {
488 return ps
->page_shift
;
492 return 0; /* Bad page size encoding */
495 static void ppc64_v3_new_to_old_hpte(target_ulong
*pte0
, target_ulong
*pte1
)
497 /* Insert B into pte0 */
498 *pte0
= (*pte0
& HPTE64_V_COMMON_BITS
) |
499 ((*pte1
& HPTE64_R_3_0_SSIZE_MASK
) <<
500 (HPTE64_V_SSIZE_SHIFT
- HPTE64_R_3_0_SSIZE_SHIFT
));
502 /* Remove B from pte1 */
503 *pte1
= *pte1
& ~HPTE64_R_3_0_SSIZE_MASK
;
507 static hwaddr
ppc_hash64_pteg_search(PowerPCCPU
*cpu
, hwaddr hash
,
508 const PPCHash64SegmentPageSizes
*sps
,
510 ppc_hash_pte64_t
*pte
, unsigned *pshift
)
513 const ppc_hash_pte64_t
*pteg
;
514 target_ulong pte0
, pte1
;
517 ptex
= (hash
& ppc_hash64_hpt_mask(cpu
)) * HPTES_PER_GROUP
;
518 pteg
= ppc_hash64_map_hptes(cpu
, ptex
, HPTES_PER_GROUP
);
522 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
523 pte0
= ppc_hash64_hpte0(cpu
, pteg
, i
);
525 * pte0 contains the valid bit and must be read before pte1,
526 * otherwise we might see an old pte1 with a new valid bit and
527 * thus an inconsistent hpte value
530 pte1
= ppc_hash64_hpte1(cpu
, pteg
, i
);
532 /* Convert format if necessary */
533 if (cpu
->env
.mmu_model
== POWERPC_MMU_3_00
&& !cpu
->vhyp
) {
534 ppc64_v3_new_to_old_hpte(&pte0
, &pte1
);
537 /* This compares V, B, H (secondary) and the AVPN */
538 if (HPTE64_V_COMPARE(pte0
, ptem
)) {
539 *pshift
= hpte_page_shift(sps
, pte0
, pte1
);
541 * If there is no match, ignore the PTE, it could simply
542 * be for a different segment size encoding and the
543 * architecture specifies we should not match. Linux will
544 * potentially leave behind PTEs for the wrong base page
545 * size when demoting segments.
550 /* We don't do anything with pshift yet as qemu TLB only deals
551 * with 4K pages anyway
555 ppc_hash64_unmap_hptes(cpu
, pteg
, ptex
, HPTES_PER_GROUP
);
559 ppc_hash64_unmap_hptes(cpu
, pteg
, ptex
, HPTES_PER_GROUP
);
561 * We didn't find a valid entry.
566 static hwaddr
ppc_hash64_htab_lookup(PowerPCCPU
*cpu
,
567 ppc_slb_t
*slb
, target_ulong eaddr
,
568 ppc_hash_pte64_t
*pte
, unsigned *pshift
)
570 CPUPPCState
*env
= &cpu
->env
;
572 uint64_t vsid
, epnmask
, epn
, ptem
;
573 const PPCHash64SegmentPageSizes
*sps
= slb
->sps
;
575 /* The SLB store path should prevent any bad page size encodings
576 * getting in there, so: */
579 /* If ISL is set in LPCR we need to clamp the page size to 4K */
580 if (env
->spr
[SPR_LPCR
] & LPCR_ISL
) {
581 /* We assume that when using TCG, 4k is first entry of SPS */
582 sps
= &cpu
->hash64_opts
->sps
[0];
583 assert(sps
->page_shift
== 12);
586 epnmask
= ~((1ULL << sps
->page_shift
) - 1);
588 if (slb
->vsid
& SLB_VSID_B
) {
590 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
591 epn
= (eaddr
& ~SEGMENT_MASK_1T
) & epnmask
;
592 hash
= vsid
^ (vsid
<< 25) ^ (epn
>> sps
->page_shift
);
595 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
596 epn
= (eaddr
& ~SEGMENT_MASK_256M
) & epnmask
;
597 hash
= vsid
^ (epn
>> sps
->page_shift
);
599 ptem
= (slb
->vsid
& SLB_VSID_PTEM
) | ((epn
>> 16) & HPTE64_V_AVPN
);
600 ptem
|= HPTE64_V_VALID
;
602 /* Page address translation */
603 qemu_log_mask(CPU_LOG_MMU
,
604 "htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
605 " hash " TARGET_FMT_plx
"\n",
606 ppc_hash64_hpt_base(cpu
), ppc_hash64_hpt_mask(cpu
), hash
);
608 /* Primary PTEG lookup */
609 qemu_log_mask(CPU_LOG_MMU
,
610 "0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
611 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
612 " hash=" TARGET_FMT_plx
"\n",
613 ppc_hash64_hpt_base(cpu
), ppc_hash64_hpt_mask(cpu
),
615 ptex
= ppc_hash64_pteg_search(cpu
, hash
, sps
, ptem
, pte
, pshift
);
618 /* Secondary PTEG lookup */
619 ptem
|= HPTE64_V_SECONDARY
;
620 qemu_log_mask(CPU_LOG_MMU
,
621 "1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
622 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
623 " hash=" TARGET_FMT_plx
"\n", ppc_hash64_hpt_base(cpu
),
624 ppc_hash64_hpt_mask(cpu
), vsid
, ptem
, ~hash
);
626 ptex
= ppc_hash64_pteg_search(cpu
, ~hash
, sps
, ptem
, pte
, pshift
);
632 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU
*cpu
,
633 uint64_t pte0
, uint64_t pte1
)
637 if (!(pte0
& HPTE64_V_LARGE
)) {
642 * The encodings in env->sps need to be carefully chosen so that
643 * this gives an unambiguous result.
645 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
646 const PPCHash64SegmentPageSizes
*sps
= &cpu
->hash64_opts
->sps
[i
];
649 if (!sps
->page_shift
) {
653 shift
= hpte_page_shift(sps
, pte0
, pte1
);
662 static void ppc_hash64_set_isi(CPUState
*cs
, uint64_t error_code
)
664 CPUPPCState
*env
= &POWERPC_CPU(cs
)->env
;
668 vpm
= !!(env
->spr
[SPR_LPCR
] & LPCR_VPM1
);
670 switch (env
->mmu_model
) {
671 case POWERPC_MMU_3_00
:
672 /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
676 vpm
= !!(env
->spr
[SPR_LPCR
] & LPCR_VPM0
);
680 if (vpm
&& !msr_hv
) {
681 cs
->exception_index
= POWERPC_EXCP_HISI
;
683 cs
->exception_index
= POWERPC_EXCP_ISI
;
685 env
->error_code
= error_code
;
688 static void ppc_hash64_set_dsi(CPUState
*cs
, uint64_t dar
, uint64_t dsisr
)
690 CPUPPCState
*env
= &POWERPC_CPU(cs
)->env
;
694 vpm
= !!(env
->spr
[SPR_LPCR
] & LPCR_VPM1
);
696 switch (env
->mmu_model
) {
697 case POWERPC_MMU_3_00
:
698 /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
702 vpm
= !!(env
->spr
[SPR_LPCR
] & LPCR_VPM0
);
706 if (vpm
&& !msr_hv
) {
707 cs
->exception_index
= POWERPC_EXCP_HDSI
;
708 env
->spr
[SPR_HDAR
] = dar
;
709 env
->spr
[SPR_HDSISR
] = dsisr
;
711 cs
->exception_index
= POWERPC_EXCP_DSI
;
712 env
->spr
[SPR_DAR
] = dar
;
713 env
->spr
[SPR_DSISR
] = dsisr
;
719 int ppc_hash64_handle_mmu_fault(PowerPCCPU
*cpu
, vaddr eaddr
,
720 int rwx
, int mmu_idx
)
722 CPUState
*cs
= CPU(cpu
);
723 CPUPPCState
*env
= &cpu
->env
;
727 ppc_hash_pte64_t pte
;
728 int exec_prot
, pp_prot
, amr_prot
, prot
;
730 const int need_prot
[] = {PAGE_READ
, PAGE_WRITE
, PAGE_EXEC
};
733 assert((rwx
== 0) || (rwx
== 1) || (rwx
== 2));
735 /* Note on LPCR usage: 970 uses HID4, but our special variant
736 * of store_spr copies relevant fields into env->spr[SPR_LPCR].
737 * Similarily we filter unimplemented bits when storing into
738 * LPCR depending on the MMU version. This code can thus just
739 * use the LPCR "as-is".
742 /* 1. Handle real mode accesses */
743 if (((rwx
== 2) && (msr_ir
== 0)) || ((rwx
!= 2) && (msr_dr
== 0))) {
744 /* Translation is supposedly "off" */
745 /* In real mode the top 4 effective address bits are (mostly) ignored */
746 raddr
= eaddr
& 0x0FFFFFFFFFFFFFFFULL
;
748 /* In HV mode, add HRMOR if top EA bit is clear */
749 if (msr_hv
|| !env
->has_hv_mode
) {
750 if (!(eaddr
>> 63)) {
751 raddr
|= env
->spr
[SPR_HRMOR
];
754 /* Otherwise, check VPM for RMA vs VRMA */
755 if (env
->spr
[SPR_LPCR
] & LPCR_VPM0
) {
756 slb
= &env
->vrma_slb
;
758 goto skip_slb_search
;
760 /* Not much else to do here */
761 cs
->exception_index
= POWERPC_EXCP_MCHECK
;
764 } else if (raddr
< env
->rmls
) {
765 /* RMA. Check bounds in RMLS */
766 raddr
|= env
->spr
[SPR_RMOR
];
768 /* The access failed, generate the approriate interrupt */
770 ppc_hash64_set_isi(cs
, SRR1_PROTFAULT
);
772 int dsisr
= DSISR_PROTFAULT
;
774 dsisr
|= DSISR_ISSTORE
;
776 ppc_hash64_set_dsi(cs
, eaddr
, dsisr
);
781 tlb_set_page(cs
, eaddr
& TARGET_PAGE_MASK
, raddr
& TARGET_PAGE_MASK
,
782 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
, mmu_idx
,
787 /* 2. Translation is on, so look up the SLB */
788 slb
= slb_lookup(cpu
, eaddr
);
790 /* No entry found, check if in-memory segment tables are in use */
791 if (ppc64_use_proc_tbl(cpu
)) {
792 /* TODO - Unsupported */
793 error_report("Segment Table Support Unimplemented");
796 /* Segment still not found, generate the appropriate interrupt */
798 cs
->exception_index
= POWERPC_EXCP_ISEG
;
801 cs
->exception_index
= POWERPC_EXCP_DSEG
;
803 env
->spr
[SPR_DAR
] = eaddr
;
810 /* 3. Check for segment level no-execute violation */
811 if ((rwx
== 2) && (slb
->vsid
& SLB_VSID_N
)) {
812 ppc_hash64_set_isi(cs
, SRR1_NOEXEC_GUARD
);
816 /* 4. Locate the PTE in the hash table */
817 ptex
= ppc_hash64_htab_lookup(cpu
, slb
, eaddr
, &pte
, &apshift
);
820 ppc_hash64_set_isi(cs
, SRR1_NOPTE
);
822 int dsisr
= DSISR_NOPTE
;
824 dsisr
|= DSISR_ISSTORE
;
826 ppc_hash64_set_dsi(cs
, eaddr
, dsisr
);
830 qemu_log_mask(CPU_LOG_MMU
,
831 "found PTE at index %08" HWADDR_PRIx
"\n", ptex
);
833 /* 5. Check access permissions */
835 exec_prot
= ppc_hash64_pte_noexec_guard(cpu
, pte
);
836 pp_prot
= ppc_hash64_pte_prot(cpu
, slb
, pte
);
837 amr_prot
= ppc_hash64_amr_prot(cpu
, pte
);
838 prot
= exec_prot
& pp_prot
& amr_prot
;
840 if ((need_prot
[rwx
] & ~prot
) != 0) {
841 /* Access right violation */
842 qemu_log_mask(CPU_LOG_MMU
, "PTE access rejected\n");
845 if (PAGE_EXEC
& ~exec_prot
) {
846 srr1
|= SRR1_NOEXEC_GUARD
; /* Access violates noexec or guard */
847 } else if (PAGE_EXEC
& ~pp_prot
) {
848 srr1
|= SRR1_PROTFAULT
; /* Access violates access authority */
850 if (PAGE_EXEC
& ~amr_prot
) {
851 srr1
|= SRR1_IAMR
; /* Access violates virt pg class key prot */
853 ppc_hash64_set_isi(cs
, srr1
);
856 if (need_prot
[rwx
] & ~pp_prot
) {
857 dsisr
|= DSISR_PROTFAULT
;
860 dsisr
|= DSISR_ISSTORE
;
862 if (need_prot
[rwx
] & ~amr_prot
) {
865 ppc_hash64_set_dsi(cs
, eaddr
, dsisr
);
870 qemu_log_mask(CPU_LOG_MMU
, "PTE access granted !\n");
872 /* 6. Update PTE referenced and changed bits if necessary */
874 new_pte1
= pte
.pte1
| HPTE64_R_R
; /* set referenced bit */
876 new_pte1
|= HPTE64_R_C
; /* set changed (dirty) bit */
878 /* Treat the page as read-only for now, so that a later write
879 * will pass through this function again to set the C bit */
883 if (new_pte1
!= pte
.pte1
) {
884 ppc_hash64_store_hpte(cpu
, ptex
, pte
.pte0
, new_pte1
);
887 /* 7. Determine the real address from the PTE */
889 raddr
= deposit64(pte
.pte1
& HPTE64_R_RPN
, 0, apshift
, eaddr
);
891 tlb_set_page(cs
, eaddr
& TARGET_PAGE_MASK
, raddr
& TARGET_PAGE_MASK
,
892 prot
, mmu_idx
, 1ULL << apshift
);
897 hwaddr
ppc_hash64_get_phys_page_debug(PowerPCCPU
*cpu
, target_ulong addr
)
899 CPUPPCState
*env
= &cpu
->env
;
902 ppc_hash_pte64_t pte
;
905 /* Handle real mode */
907 /* In real mode the top 4 effective address bits are ignored */
908 raddr
= addr
& 0x0FFFFFFFFFFFFFFFULL
;
910 /* In HV mode, add HRMOR if top EA bit is clear */
911 if ((msr_hv
|| !env
->has_hv_mode
) && !(addr
>> 63)) {
912 return raddr
| env
->spr
[SPR_HRMOR
];
915 /* Otherwise, check VPM for RMA vs VRMA */
916 if (env
->spr
[SPR_LPCR
] & LPCR_VPM0
) {
917 slb
= &env
->vrma_slb
;
921 } else if (raddr
< env
->rmls
) {
922 /* RMA. Check bounds in RMLS */
923 return raddr
| env
->spr
[SPR_RMOR
];
928 slb
= slb_lookup(cpu
, addr
);
934 ptex
= ppc_hash64_htab_lookup(cpu
, slb
, addr
, &pte
, &apshift
);
939 return deposit64(pte
.pte1
& HPTE64_R_RPN
, 0, apshift
, addr
)
943 void ppc_hash64_store_hpte(PowerPCCPU
*cpu
, hwaddr ptex
,
944 uint64_t pte0
, uint64_t pte1
)
947 hwaddr offset
= ptex
* HASH_PTE_SIZE_64
;
950 PPCVirtualHypervisorClass
*vhc
=
951 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
952 vhc
->store_hpte(cpu
->vhyp
, ptex
, pte0
, pte1
);
955 base
= ppc_hash64_hpt_base(cpu
);
957 stq_phys(CPU(cpu
)->as
, base
+ offset
, pte0
);
958 stq_phys(CPU(cpu
)->as
, base
+ offset
+ HASH_PTE_SIZE_64
/ 2, pte1
);
961 void ppc_hash64_tlb_flush_hpte(PowerPCCPU
*cpu
, target_ulong ptex
,
962 target_ulong pte0
, target_ulong pte1
)
965 * XXX: given the fact that there are too many segments to
966 * invalidate, and we still don't have a tlb_flush_mask(env, n,
967 * mask) in QEMU, we just invalidate all TLBs
969 cpu
->env
.tlb_need_flush
= TLB_NEED_GLOBAL_FLUSH
| TLB_NEED_LOCAL_FLUSH
;
972 static void ppc_hash64_update_rmls(PowerPCCPU
*cpu
)
974 CPUPPCState
*env
= &cpu
->env
;
975 uint64_t lpcr
= env
->spr
[SPR_LPCR
];
978 * This is the full 4 bits encoding of POWER8. Previous
979 * CPUs only support a subset of these but the filtering
980 * is done when writing LPCR
982 switch ((lpcr
& LPCR_RMLS
) >> LPCR_RMLS_SHIFT
) {
984 env
->rmls
= 0x2000000ull
;
987 env
->rmls
= 0x4000000ull
;
989 case 0x7: /* 128MB */
990 env
->rmls
= 0x8000000ull
;
992 case 0x4: /* 256MB */
993 env
->rmls
= 0x10000000ull
;
996 env
->rmls
= 0x40000000ull
;
999 env
->rmls
= 0x400000000ull
;
1002 /* What to do here ??? */
1007 static void ppc_hash64_update_vrma(PowerPCCPU
*cpu
)
1009 CPUPPCState
*env
= &cpu
->env
;
1010 const PPCHash64SegmentPageSizes
*sps
= NULL
;
1011 target_ulong esid
, vsid
, lpcr
;
1012 ppc_slb_t
*slb
= &env
->vrma_slb
;
1016 /* First clear it */
1017 slb
->esid
= slb
->vsid
= 0;
1020 /* Is VRMA enabled ? */
1021 lpcr
= env
->spr
[SPR_LPCR
];
1022 if (!(lpcr
& LPCR_VPM0
)) {
1026 /* Make one up. Mostly ignore the ESID which will not be
1027 * needed for translation
1029 vsid
= SLB_VSID_VRMA
;
1030 vrmasd
= (lpcr
& LPCR_VRMASD
) >> LPCR_VRMASD_SHIFT
;
1031 vsid
|= (vrmasd
<< 4) & (SLB_VSID_L
| SLB_VSID_LP
);
1034 for (i
= 0; i
< PPC_PAGE_SIZES_MAX_SZ
; i
++) {
1035 const PPCHash64SegmentPageSizes
*sps1
= &cpu
->hash64_opts
->sps
[i
];
1037 if (!sps1
->page_shift
) {
1041 if ((vsid
& SLB_VSID_LLP_MASK
) == sps1
->slb_enc
) {
1048 error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
1049 " vsid 0x"TARGET_FMT_lx
, esid
, vsid
);
1058 void ppc_store_lpcr(PowerPCCPU
*cpu
, target_ulong val
)
1060 CPUPPCState
*env
= &cpu
->env
;
1063 /* Filter out bits */
1064 switch (env
->mmu_model
) {
1065 case POWERPC_MMU_64B
: /* 970 */
1069 if (val
& 0x8000000000000000ull
) {
1073 lpcr
|= (0x4ull
<< LPCR_RMLS_SHIFT
);
1075 if (val
& 0x4000000000000000ull
) {
1076 lpcr
|= (0x2ull
<< LPCR_RMLS_SHIFT
);
1078 if (val
& 0x2000000000000000ull
) {
1079 lpcr
|= (0x1ull
<< LPCR_RMLS_SHIFT
);
1081 env
->spr
[SPR_RMOR
] = ((lpcr
>> 41) & 0xffffull
) << 26;
1083 /* XXX We could also write LPID from HID4 here
1084 * but since we don't tag any translation on it
1085 * it doesn't actually matter
1087 /* XXX For proper emulation of 970 we also need
1088 * to dig HRMOR out of HID5
1091 case POWERPC_MMU_2_03
: /* P5p */
1092 lpcr
= val
& (LPCR_RMLS
| LPCR_ILE
|
1093 LPCR_LPES0
| LPCR_LPES1
|
1094 LPCR_RMI
| LPCR_HDICE
);
1096 case POWERPC_MMU_2_06
: /* P7 */
1097 lpcr
= val
& (LPCR_VPM0
| LPCR_VPM1
| LPCR_ISL
| LPCR_DPFD
|
1098 LPCR_VRMASD
| LPCR_RMLS
| LPCR_ILE
|
1099 LPCR_P7_PECE0
| LPCR_P7_PECE1
| LPCR_P7_PECE2
|
1100 LPCR_MER
| LPCR_TC
|
1101 LPCR_LPES0
| LPCR_LPES1
| LPCR_HDICE
);
1103 case POWERPC_MMU_2_07
: /* P8 */
1104 lpcr
= val
& (LPCR_VPM0
| LPCR_VPM1
| LPCR_ISL
| LPCR_KBV
|
1105 LPCR_DPFD
| LPCR_VRMASD
| LPCR_RMLS
| LPCR_ILE
|
1106 LPCR_AIL
| LPCR_ONL
| LPCR_P8_PECE0
| LPCR_P8_PECE1
|
1107 LPCR_P8_PECE2
| LPCR_P8_PECE3
| LPCR_P8_PECE4
|
1108 LPCR_MER
| LPCR_TC
| LPCR_LPES0
| LPCR_HDICE
);
1110 case POWERPC_MMU_3_00
: /* P9 */
1111 lpcr
= val
& (LPCR_VPM1
| LPCR_ISL
| LPCR_KBV
| LPCR_DPFD
|
1112 (LPCR_PECE_U_MASK
& LPCR_HVEE
) | LPCR_ILE
| LPCR_AIL
|
1113 LPCR_UPRT
| LPCR_EVIRT
| LPCR_ONL
| LPCR_HR
| LPCR_LD
|
1114 (LPCR_PECE_L_MASK
& (LPCR_PDEE
| LPCR_HDEE
| LPCR_EEE
|
1115 LPCR_DEE
| LPCR_OEE
)) | LPCR_MER
| LPCR_GTSE
| LPCR_TC
|
1116 LPCR_HEIC
| LPCR_LPES0
| LPCR_HVICE
| LPCR_HDICE
);
1118 * If we have a virtual hypervisor, we need to bring back RMLS. It
1119 * doesn't exist on an actual P9 but that's all we know how to
1120 * configure with softmmu at the moment
1123 lpcr
|= (val
& LPCR_RMLS
);
1129 env
->spr
[SPR_LPCR
] = lpcr
;
1130 ppc_hash64_update_rmls(cpu
);
1131 ppc_hash64_update_vrma(cpu
);
1134 void helper_store_lpcr(CPUPPCState
*env
, target_ulong val
)
1136 PowerPCCPU
*cpu
= ppc_env_get_cpu(env
);
1138 ppc_store_lpcr(cpu
, val
);
1141 void ppc_hash64_init(PowerPCCPU
*cpu
)
1143 CPUPPCState
*env
= &cpu
->env
;
1144 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cpu
);
1146 if (!pcc
->hash64_opts
) {
1147 assert(!(env
->mmu_model
& POWERPC_MMU_64
));
1151 cpu
->hash64_opts
= g_memdup(pcc
->hash64_opts
, sizeof(*cpu
->hash64_opts
));
1154 void ppc_hash64_finalize(PowerPCCPU
*cpu
)
1156 g_free(cpu
->hash64_opts
);
1159 const PPCHash64Options ppc_hash64_opts_basic
= {
1163 { .page_shift
= 12, /* 4K */
1165 .enc
= { { .page_shift
= 12, .pte_enc
= 0 } }
1167 { .page_shift
= 24, /* 16M */
1169 .enc
= { { .page_shift
= 24, .pte_enc
= 0 } }
1174 const PPCHash64Options ppc_hash64_opts_POWER7
= {
1175 .flags
= PPC_HASH64_1TSEG
| PPC_HASH64_AMR
| PPC_HASH64_CI_LARGEPAGE
,
1179 .page_shift
= 12, /* 4K */
1181 .enc
= { { .page_shift
= 12, .pte_enc
= 0 },
1182 { .page_shift
= 16, .pte_enc
= 0x7 },
1183 { .page_shift
= 24, .pte_enc
= 0x38 }, },
1186 .page_shift
= 16, /* 64K */
1187 .slb_enc
= SLB_VSID_64K
,
1188 .enc
= { { .page_shift
= 16, .pte_enc
= 0x1 },
1189 { .page_shift
= 24, .pte_enc
= 0x8 }, },
1192 .page_shift
= 24, /* 16M */
1193 .slb_enc
= SLB_VSID_16M
,
1194 .enc
= { { .page_shift
= 24, .pte_enc
= 0 }, },
1197 .page_shift
= 34, /* 16G */
1198 .slb_enc
= SLB_VSID_16G
,
1199 .enc
= { { .page_shift
= 34, .pte_enc
= 0x3 }, },
1204 void ppc_hash64_filter_pagesizes(PowerPCCPU
*cpu
,
1205 bool (*cb
)(void *, uint32_t, uint32_t),
1208 PPCHash64Options
*opts
= cpu
->hash64_opts
;
1211 bool ci_largepage
= false;
1216 for (i
= 0; i
< ARRAY_SIZE(opts
->sps
); i
++) {
1217 PPCHash64SegmentPageSizes
*sps
= &opts
->sps
[i
];
1223 if (!sps
->page_shift
) {
1227 for (j
= 0; j
< ARRAY_SIZE(sps
->enc
); j
++) {
1228 PPCHash64PageSize
*ps
= &sps
->enc
[j
];
1231 if (!ps
->page_shift
) {
1235 if (cb(opaque
, sps
->page_shift
, ps
->page_shift
)) {
1236 if (ps
->page_shift
>= 16) {
1237 ci_largepage
= true;
1239 sps
->enc
[m
++] = *ps
;
1243 /* Clear rest of the row */
1244 for (j
= m
; j
< ARRAY_SIZE(sps
->enc
); j
++) {
1245 memset(&sps
->enc
[j
], 0, sizeof(sps
->enc
[j
]));
1253 /* Clear the rest of the table */
1254 for (i
= n
; i
< ARRAY_SIZE(opts
->sps
); i
++) {
1255 memset(&opts
->sps
[i
], 0, sizeof(opts
->sps
[i
]));
1258 if (!ci_largepage
) {
1259 opts
->flags
&= ~PPC_HASH64_CI_LARGEPAGE
;