2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "sysemu/kvm.h"
24 #include "mmu-hash64.h"
30 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
31 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
33 # define LOG_MMU(...) do { } while (0)
34 # define LOG_MMU_STATE(...) do { } while (0)
38 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
40 # define LOG_SLB(...) do { } while (0)
43 struct mmu_ctx_hash64
{
44 hwaddr raddr
; /* Real address */
45 int prot
; /* Protection bits */
46 int key
; /* Access key */
53 static ppc_slb_t
*slb_lookup(CPUPPCState
*env
, target_ulong eaddr
)
55 uint64_t esid_256M
, esid_1T
;
58 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
60 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
61 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
63 for (n
= 0; n
< env
->slb_nr
; n
++) {
64 ppc_slb_t
*slb
= &env
->slb
[n
];
66 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
67 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
68 /* We check for 1T matches on all MMUs here - if the MMU
69 * doesn't have 1T segment support, we will have prevented 1T
70 * entries from being inserted in the slbmte code. */
71 if (((slb
->esid
== esid_256M
) &&
72 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
73 || ((slb
->esid
== esid_1T
) &&
74 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
82 void dump_slb(FILE *f
, fprintf_function cpu_fprintf
, CPUPPCState
*env
)
87 cpu_synchronize_state(env
);
89 cpu_fprintf(f
, "SLB\tESID\t\t\tVSID\n");
90 for (i
= 0; i
< env
->slb_nr
; i
++) {
91 slbe
= env
->slb
[i
].esid
;
92 slbv
= env
->slb
[i
].vsid
;
93 if (slbe
== 0 && slbv
== 0) {
96 cpu_fprintf(f
, "%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
101 void helper_slbia(CPUPPCState
*env
)
103 int n
, do_invalidate
;
106 /* XXX: Warning: slbia never invalidates the first segment */
107 for (n
= 1; n
< env
->slb_nr
; n
++) {
108 ppc_slb_t
*slb
= &env
->slb
[n
];
110 if (slb
->esid
& SLB_ESID_V
) {
111 slb
->esid
&= ~SLB_ESID_V
;
112 /* XXX: given the fact that segment size is 256 MB or 1TB,
113 * and we still don't have a tlb_flush_mask(env, n, mask)
114 * in QEMU, we just invalidate all TLBs
124 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
128 slb
= slb_lookup(env
, addr
);
133 if (slb
->esid
& SLB_ESID_V
) {
134 slb
->esid
&= ~SLB_ESID_V
;
136 /* XXX: given the fact that segment size is 256 MB or 1TB,
137 * and we still don't have a tlb_flush_mask(env, n, mask)
138 * in QEMU, we just invalidate all TLBs
144 int ppc_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
146 int slot
= rb
& 0xfff;
147 ppc_slb_t
*slb
= &env
->slb
[slot
];
149 if (rb
& (0x1000 - env
->slb_nr
)) {
150 return -1; /* Reserved bits set or slot too high */
152 if (rs
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
153 return -1; /* Bad segment size */
155 if ((rs
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
156 return -1; /* 1T segment on MMU that doesn't support it */
159 /* Mask out the slot number as we store the entry */
160 slb
->esid
= rb
& (SLB_ESID_ESID
| SLB_ESID_V
);
163 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
164 " %016" PRIx64
"\n", __func__
, slot
, rb
, rs
,
165 slb
->esid
, slb
->vsid
);
170 static int ppc_load_slb_esid(CPUPPCState
*env
, target_ulong rb
,
173 int slot
= rb
& 0xfff;
174 ppc_slb_t
*slb
= &env
->slb
[slot
];
176 if (slot
>= env
->slb_nr
) {
184 static int ppc_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
,
187 int slot
= rb
& 0xfff;
188 ppc_slb_t
*slb
= &env
->slb
[slot
];
190 if (slot
>= env
->slb_nr
) {
198 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
200 if (ppc_store_slb(env
, rb
, rs
) < 0) {
201 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
206 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
210 if (ppc_load_slb_esid(env
, rb
, &rt
) < 0) {
211 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
217 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
221 if (ppc_load_slb_vsid(env
, rb
, &rt
) < 0) {
222 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
229 * 64-bit hash table MMU handling
232 static int ppc_hash64_pp_check(int key
, int pp
, bool nx
)
236 /* Compute access rights */
237 /* When pp is 4, 5 or 7, the result is undefined. Set it to noaccess */
244 access
|= PAGE_WRITE
;
262 access
= PAGE_READ
| PAGE_WRITE
;
273 static int ppc_hash64_check_prot(int prot
, int rwx
)
278 if (prot
& PAGE_EXEC
) {
283 } else if (rwx
== 1) {
284 if (prot
& PAGE_WRITE
) {
290 if (prot
& PAGE_READ
) {
300 static int ppc_hash64_pte_update_flags(struct mmu_ctx_hash64
*ctx
,
301 uint64_t *pte1p
, int ret
, int rw
)
305 /* Update page flags */
306 if (!(*pte1p
& HPTE64_R_R
)) {
307 /* Update accessed flag */
308 *pte1p
|= HPTE64_R_R
;
311 if (!(*pte1p
& HPTE64_R_C
)) {
312 if (rw
== 1 && ret
== 0) {
313 /* Update changed flag */
314 *pte1p
|= HPTE64_R_C
;
317 /* Force page fault for first write access */
318 ctx
->prot
&= ~PAGE_WRITE
;
325 static hwaddr
ppc_hash64_pteg_search(CPUPPCState
*env
, hwaddr pteg_off
,
326 bool secondary
, target_ulong ptem
,
327 ppc_hash_pte64_t
*pte
)
329 hwaddr pte_offset
= pteg_off
;
330 target_ulong pte0
, pte1
;
333 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
334 pte0
= ppc_hash64_load_hpte0(env
, pte_offset
);
335 pte1
= ppc_hash64_load_hpte1(env
, pte_offset
);
337 if ((pte0
& HPTE64_V_VALID
)
338 && (secondary
== !!(pte0
& HPTE64_V_SECONDARY
))
339 && HPTE64_V_COMPARE(pte0
, ptem
)) {
345 pte_offset
+= HASH_PTE_SIZE_64
;
351 static hwaddr
ppc_hash64_htab_lookup(CPUPPCState
*env
,
352 ppc_slb_t
*slb
, target_ulong eaddr
,
353 ppc_hash_pte64_t
*pte
)
355 hwaddr pteg_off
, pte_offset
;
357 uint64_t vsid
, epnshift
, epnmask
, epn
, ptem
;
359 /* Page size according to the SLB, which we use to generate the
360 * EPN for hash table lookup.. When we implement more recent MMU
361 * extensions this might be different from the actual page size
362 * encoded in the PTE */
363 epnshift
= (slb
->vsid
& SLB_VSID_L
)
364 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
365 epnmask
= ~((1ULL << epnshift
) - 1);
367 if (slb
->vsid
& SLB_VSID_B
) {
369 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
370 epn
= (eaddr
& ~SEGMENT_MASK_1T
) & epnmask
;
371 hash
= vsid
^ (vsid
<< 25) ^ (epn
>> epnshift
);
374 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
375 epn
= (eaddr
& ~SEGMENT_MASK_256M
) & epnmask
;
376 hash
= vsid
^ (epn
>> epnshift
);
378 ptem
= (slb
->vsid
& SLB_VSID_PTEM
) | ((epn
>> 16) & HPTE64_V_AVPN
);
380 /* Page address translation */
381 LOG_MMU("htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
382 " hash " TARGET_FMT_plx
"\n",
383 env
->htab_base
, env
->htab_mask
, hash
);
385 /* Primary PTEG lookup */
386 LOG_MMU("0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
387 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
388 " hash=" TARGET_FMT_plx
"\n",
389 env
->htab_base
, env
->htab_mask
, vsid
, ptem
, hash
);
390 pteg_off
= (hash
* HASH_PTEG_SIZE_64
) & env
->htab_mask
;
391 pte_offset
= ppc_hash64_pteg_search(env
, pteg_off
, 0, ptem
, pte
);
393 if (pte_offset
== -1) {
394 /* Secondary PTEG lookup */
395 LOG_MMU("1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
396 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
397 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
398 env
->htab_mask
, vsid
, ptem
, ~hash
);
400 pteg_off
= (~hash
* HASH_PTEG_SIZE_64
) & env
->htab_mask
;
401 pte_offset
= ppc_hash64_pteg_search(env
, pteg_off
, 1, ptem
, pte
);
407 static int ppc_hash64_translate(CPUPPCState
*env
, struct mmu_ctx_hash64
*ctx
,
408 target_ulong eaddr
, int rwx
)
413 ppc_hash_pte64_t pte
;
414 int target_page_bits
;
416 assert((rwx
== 0) || (rwx
== 1) || (rwx
== 2));
418 /* 1. Handle real mode accesses */
419 if (((rwx
== 2) && (msr_ir
== 0)) || ((rwx
!= 2) && (msr_dr
== 0))) {
420 /* Translation is off */
421 /* In real mode the top 4 effective address bits are ignored */
422 ctx
->raddr
= eaddr
& 0x0FFFFFFFFFFFFFFFULL
;
423 ctx
->prot
= PAGE_READ
| PAGE_EXEC
| PAGE_WRITE
;
427 /* 2. Translation is on, so look up the SLB */
428 slb
= slb_lookup(env
, eaddr
);
434 /* 3. Check for segment level no-execute violation */
435 if ((rwx
== 2) && (slb
->vsid
& SLB_VSID_N
)) {
439 /* 4. Locate the PTE in the hash table */
440 pte_offset
= ppc_hash64_htab_lookup(env
, slb
, eaddr
, &pte
);
441 if (pte_offset
== -1) {
444 LOG_MMU("found PTE at offset %08" HWADDR_PRIx
"\n", pte_offset
);
446 /* 5. Check access permissions */
447 ctx
->key
= !!(msr_pr
? (slb
->vsid
& SLB_VSID_KP
)
448 : (slb
->vsid
& SLB_VSID_KS
));
454 pp
= (pte
.pte1
& HPTE64_R_PP
) | ((pte
.pte1
& HPTE64_R_PP0
) >> 61);
455 /* No execute if either noexec or guarded bits set */
456 nx
= (pte
.pte1
& HPTE64_R_N
) || (pte
.pte1
& HPTE64_R_G
);
457 /* Compute access rights */
458 access
= ppc_hash64_pp_check(ctx
->key
, pp
, nx
);
459 /* Keep the matching PTE informations */
460 ctx
->raddr
= pte
.pte1
;
462 ret
= ppc_hash64_check_prot(ctx
->prot
, rwx
);
465 /* Access right violation */
466 LOG_MMU("PTE access rejected\n");
470 LOG_MMU("PTE access granted !\n");
472 /* 6. Update PTE referenced and changed bits if necessary */
474 if (ppc_hash64_pte_update_flags(ctx
, &pte
.pte1
, ret
, rwx
) == 1) {
475 ppc_hash64_store_hpte1(env
, pte_offset
, pte
.pte1
);
478 /* We have a TLB that saves 4K pages, so let's
479 * split a huge page to 4k chunks */
480 target_page_bits
= (slb
->vsid
& SLB_VSID_L
)
481 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
482 if (target_page_bits
!= TARGET_PAGE_BITS
) {
483 ctx
->raddr
|= (eaddr
& ((1 << target_page_bits
) - 1))
489 hwaddr
ppc_hash64_get_phys_page_debug(CPUPPCState
*env
, target_ulong addr
)
491 struct mmu_ctx_hash64 ctx
;
493 if (unlikely(ppc_hash64_translate(env
, &ctx
, addr
, 0) != 0)) {
497 return ctx
.raddr
& TARGET_PAGE_MASK
;
500 int ppc_hash64_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rwx
,
503 struct mmu_ctx_hash64 ctx
;
506 ret
= ppc_hash64_translate(env
, &ctx
, address
, rwx
);
508 tlb_set_page(env
, address
& TARGET_PAGE_MASK
,
509 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
510 mmu_idx
, TARGET_PAGE_SIZE
);
512 } else if (ret
< 0) {
517 env
->exception_index
= POWERPC_EXCP_ISI
;
518 env
->error_code
= 0x40000000;
521 /* Access rights violation */
522 env
->exception_index
= POWERPC_EXCP_ISI
;
523 env
->error_code
= 0x08000000;
526 /* No execute protection violation */
527 env
->exception_index
= POWERPC_EXCP_ISI
;
528 env
->error_code
= 0x10000000;
531 /* No match in segment table */
532 env
->exception_index
= POWERPC_EXCP_ISEG
;
539 /* No matches in page tables or TLB */
540 env
->exception_index
= POWERPC_EXCP_DSI
;
542 env
->spr
[SPR_DAR
] = address
;
544 env
->spr
[SPR_DSISR
] = 0x42000000;
546 env
->spr
[SPR_DSISR
] = 0x40000000;
550 /* Access rights violation */
551 env
->exception_index
= POWERPC_EXCP_DSI
;
553 env
->spr
[SPR_DAR
] = address
;
555 env
->spr
[SPR_DSISR
] = 0x0A000000;
557 env
->spr
[SPR_DSISR
] = 0x08000000;
561 /* No match in segment table */
562 env
->exception_index
= POWERPC_EXCP_DSEG
;
564 env
->spr
[SPR_DAR
] = address
;
569 printf("%s: set exception to %d %02x\n", __func__
,
570 env
->exception
, env
->error_code
);