2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
23 #include "sysemu/kvm.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/exec-all.h"
29 #include "helper_regs.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/qemu-print.h"
34 #include "mmu-book3s-v3.h"
35 #include "mmu-radix64.h"
37 /* #define DUMP_PAGE_TABLES */
39 void ppc_store_sdr1(CPUPPCState
*env
, target_ulong value
)
41 PowerPCCPU
*cpu
= env_archcpu(env
);
42 qemu_log_mask(CPU_LOG_MMU
, "%s: " TARGET_FMT_lx
"\n", __func__
, value
);
43 assert(!cpu
->env
.has_hv_mode
|| !cpu
->vhyp
);
44 #if defined(TARGET_PPC64)
45 if (mmu_is_64bit(env
->mmu_model
)) {
46 target_ulong sdr_mask
= SDR_64_HTABORG
| SDR_64_HTABSIZE
;
47 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
49 if (value
& ~sdr_mask
) {
50 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid bits 0x"TARGET_FMT_lx
51 " set in SDR1", value
& ~sdr_mask
);
55 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
56 " stored in SDR1", htabsize
);
60 #endif /* defined(TARGET_PPC64) */
61 /* FIXME: Should check for valid HTABMASK values in 32-bit case */
62 env
->spr
[SPR_SDR1
] = value
;
65 /*****************************************************************************/
66 /* PowerPC MMU emulation */
68 static int pp_check(int key
, int pp
, int nx
)
72 /* Compute access rights */
95 access
= PAGE_READ
| PAGE_WRITE
;
106 static int check_prot(int prot
, MMUAccessType access_type
)
108 return prot
& prot_for_access_type(access_type
) ? 0 : -2;
111 int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
112 int way
, int is_code
)
116 /* Select TLB num in a way from address */
117 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
119 nr
+= env
->tlb_per_way
* way
;
120 /* 6xx have separate TLBs for instructions and data */
121 if (is_code
&& env
->id_tlbs
== 1) {
128 static int ppc6xx_tlb_pte_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
129 target_ulong pte1
, int h
,
130 MMUAccessType access_type
)
132 target_ulong ptem
, mmask
;
133 int access
, ret
, pteh
, ptev
, pp
;
136 /* Check validity and table match */
137 ptev
= pte_is_valid(pte0
);
138 pteh
= (pte0
>> 6) & 1;
139 if (ptev
&& h
== pteh
) {
140 /* Check vsid & api */
141 ptem
= pte0
& PTE_PTEM_MASK
;
142 mmask
= PTE_CHECK_MASK
;
143 pp
= pte1
& 0x00000003;
144 if (ptem
== ctx
->ptem
) {
145 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
146 /* all matches should have equal RPN, WIMG & PP */
147 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
148 qemu_log_mask(CPU_LOG_MMU
, "Bad RPN/WIMG/PP\n");
152 /* Compute access rights */
153 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
154 /* Keep the matching PTE information */
157 ret
= check_prot(ctx
->prot
, access_type
);
160 qemu_log_mask(CPU_LOG_MMU
, "PTE access granted !\n");
162 /* Access right violation */
163 qemu_log_mask(CPU_LOG_MMU
, "PTE access rejected\n");
171 static int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
172 int ret
, MMUAccessType access_type
)
176 /* Update page flags */
177 if (!(*pte1p
& 0x00000100)) {
178 /* Update accessed flag */
179 *pte1p
|= 0x00000100;
182 if (!(*pte1p
& 0x00000080)) {
183 if (access_type
== MMU_DATA_STORE
&& ret
== 0) {
184 /* Update changed flag */
185 *pte1p
|= 0x00000080;
188 /* Force page fault for first write access */
189 ctx
->prot
&= ~PAGE_WRITE
;
196 /* Software driven TLB helpers */
198 static int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
199 target_ulong eaddr
, MMUAccessType access_type
)
206 ret
= -1; /* No TLB found */
207 for (way
= 0; way
< env
->nb_ways
; way
++) {
208 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, access_type
== MMU_INST_FETCH
);
209 tlb
= &env
->tlb
.tlb6
[nr
];
210 /* This test "emulates" the PTE index match for hardware TLBs */
211 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
212 qemu_log_mask(CPU_LOG_MMU
, "TLB %d/%d %s [" TARGET_FMT_lx
213 " " TARGET_FMT_lx
"] <> " TARGET_FMT_lx
"\n",
215 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
216 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
219 qemu_log_mask(CPU_LOG_MMU
, "TLB %d/%d %s " TARGET_FMT_lx
" <> "
220 TARGET_FMT_lx
" " TARGET_FMT_lx
" %c %c\n",
222 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
223 tlb
->EPN
, eaddr
, tlb
->pte1
,
224 access_type
== MMU_DATA_STORE
? 'S' : 'L',
225 access_type
== MMU_INST_FETCH
? 'I' : 'D');
226 switch (ppc6xx_tlb_pte_check(ctx
, tlb
->pte0
, tlb
->pte1
,
229 /* TLB inconsistency */
232 /* Access violation */
243 * XXX: we should go on looping to check all TLBs
244 * consistency but we can speed-up the whole thing as
245 * the result would be undefined if TLBs are not
255 qemu_log_mask(CPU_LOG_MMU
, "found TLB at addr " HWADDR_FMT_plx
256 " prot=%01x ret=%d\n",
257 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
258 /* Update page flags */
259 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, access_type
);
265 /* Perform BAT hit & translation */
266 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
267 int *validp
, int *protp
, target_ulong
*BATu
,
273 bl
= (*BATu
& 0x00001FFC) << 15;
276 if ((!FIELD_EX64(env
->msr
, MSR
, PR
) && (*BATu
& 0x00000002)) ||
277 (FIELD_EX64(env
->msr
, MSR
, PR
) && (*BATu
& 0x00000001))) {
279 pp
= *BATl
& 0x00000003;
281 prot
= PAGE_READ
| PAGE_EXEC
;
292 static int get_bat_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
293 target_ulong
virtual, MMUAccessType access_type
)
295 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
296 target_ulong BEPIl
, BEPIu
, bl
;
299 bool ifetch
= access_type
== MMU_INST_FETCH
;
301 qemu_log_mask(CPU_LOG_MMU
, "%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
302 ifetch
? 'I' : 'D', virtual);
304 BATlt
= env
->IBAT
[1];
305 BATut
= env
->IBAT
[0];
307 BATlt
= env
->DBAT
[1];
308 BATut
= env
->DBAT
[0];
310 for (i
= 0; i
< env
->nb_BATs
; i
++) {
313 BEPIu
= *BATu
& 0xF0000000;
314 BEPIl
= *BATu
& 0x0FFE0000;
315 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
316 qemu_log_mask(CPU_LOG_MMU
, "%s: %cBAT%d v " TARGET_FMT_lx
" BATu "
317 TARGET_FMT_lx
" BATl " TARGET_FMT_lx
"\n", __func__
,
318 ifetch
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
319 if ((virtual & 0xF0000000) == BEPIu
&&
320 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
323 /* Get physical address */
324 ctx
->raddr
= (*BATl
& 0xF0000000) |
325 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
326 (virtual & 0x0001F000);
327 /* Compute access rights */
329 ret
= check_prot(ctx
->prot
, access_type
);
331 qemu_log_mask(CPU_LOG_MMU
, "BAT %d match: r " HWADDR_FMT_plx
332 " prot=%c%c\n", i
, ctx
->raddr
,
333 ctx
->prot
& PAGE_READ
? 'R' : '-',
334 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
341 if (qemu_log_enabled()) {
342 qemu_log_mask(CPU_LOG_MMU
, "no BAT match for "
343 TARGET_FMT_lx
":\n", virtual);
344 for (i
= 0; i
< 4; i
++) {
347 BEPIu
= *BATu
& 0xF0000000;
348 BEPIl
= *BATu
& 0x0FFE0000;
349 bl
= (*BATu
& 0x00001FFC) << 15;
350 qemu_log_mask(CPU_LOG_MMU
, "%s: %cBAT%d v "
351 TARGET_FMT_lx
" BATu " TARGET_FMT_lx
352 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
353 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
354 __func__
, ifetch
? 'I' : 'D', i
, virtual,
355 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
363 /* Perform segment based translation */
364 static int get_segment_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
365 target_ulong eaddr
, MMUAccessType access_type
,
368 PowerPCCPU
*cpu
= env_archcpu(env
);
371 int ds
, target_page_bits
;
374 target_ulong sr
, pgidx
;
376 pr
= FIELD_EX64(env
->msr
, MSR
, PR
);
379 sr
= env
->sr
[eaddr
>> 28];
380 ctx
->key
= (((sr
& 0x20000000) && pr
) ||
381 ((sr
& 0x40000000) && !pr
)) ? 1 : 0;
382 ds
= sr
& 0x80000000 ? 1 : 0;
383 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
384 vsid
= sr
& 0x00FFFFFF;
385 target_page_bits
= TARGET_PAGE_BITS
;
386 qemu_log_mask(CPU_LOG_MMU
,
387 "Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
388 " nip=" TARGET_FMT_lx
" lr=" TARGET_FMT_lx
389 " ir=%d dr=%d pr=%d %d t=%d\n",
390 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
,
391 (int)FIELD_EX64(env
->msr
, MSR
, IR
),
392 (int)FIELD_EX64(env
->msr
, MSR
, DR
), pr
? 1 : 0,
393 access_type
== MMU_DATA_STORE
, type
);
394 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
396 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
398 qemu_log_mask(CPU_LOG_MMU
,
399 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
400 ctx
->key
, ds
, ctx
->nx
, vsid
);
403 /* Check if instruction fetch is allowed, if needed */
404 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
405 /* Page address translation */
406 qemu_log_mask(CPU_LOG_MMU
, "htab_base " HWADDR_FMT_plx
407 " htab_mask " HWADDR_FMT_plx
408 " hash " HWADDR_FMT_plx
"\n",
409 ppc_hash32_hpt_base(cpu
), ppc_hash32_hpt_mask(cpu
), hash
);
411 ctx
->hash
[1] = ~hash
;
413 /* Initialize real address with an invalid value */
414 ctx
->raddr
= (hwaddr
)-1ULL;
415 /* Software TLB search */
416 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, access_type
);
417 #if defined(DUMP_PAGE_TABLES)
418 if (qemu_loglevel_mask(CPU_LOG_MMU
)) {
419 CPUState
*cs
= env_cpu(env
);
421 uint32_t a0
, a1
, a2
, a3
;
423 qemu_log("Page table: " HWADDR_FMT_plx
" len " HWADDR_FMT_plx
424 "\n", ppc_hash32_hpt_base(cpu
),
425 ppc_hash32_hpt_mask(cpu
) + 0x80);
426 for (curaddr
= ppc_hash32_hpt_base(cpu
);
427 curaddr
< (ppc_hash32_hpt_base(cpu
)
428 + ppc_hash32_hpt_mask(cpu
) + 0x80);
430 a0
= ldl_phys(cs
->as
, curaddr
);
431 a1
= ldl_phys(cs
->as
, curaddr
+ 4);
432 a2
= ldl_phys(cs
->as
, curaddr
+ 8);
433 a3
= ldl_phys(cs
->as
, curaddr
+ 12);
434 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
435 qemu_log(HWADDR_FMT_plx
": %08x %08x %08x %08x\n",
436 curaddr
, a0
, a1
, a2
, a3
);
442 qemu_log_mask(CPU_LOG_MMU
, "No access allowed\n");
446 qemu_log_mask(CPU_LOG_MMU
, "direct store...\n");
447 /* Direct-store segment : absolutely *BUGGY* for now */
451 /* Integer load/store : only access allowed */
454 /* No code fetch is allowed in direct-store areas */
457 /* Floating point load/store */
460 /* lwarx, ldarx or srwcx. */
464 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
466 * Should make the instruction do no-op. As it already do
467 * no-op, it's quite easy :-)
475 qemu_log_mask(CPU_LOG_MMU
, "ERROR: instruction should not need "
476 "address translation\n");
479 if ((access_type
== MMU_DATA_STORE
|| ctx
->key
!= 1) &&
480 (access_type
== MMU_DATA_LOAD
|| ctx
->key
!= 0)) {
491 /* Generic TLB check function for embedded PowerPC implementations */
492 static int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
494 target_ulong address
, uint32_t pid
, int i
)
498 /* Check valid flag */
499 if (!(tlb
->prot
& PAGE_VALID
)) {
502 mask
= ~(tlb
->size
- 1);
503 qemu_log_mask(CPU_LOG_MMU
, "%s: TLB %d address " TARGET_FMT_lx
504 " PID %u <=> " TARGET_FMT_lx
" " TARGET_FMT_lx
" %u %x\n",
505 __func__
, i
, address
, pid
, tlb
->EPN
,
506 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
508 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
511 /* Check effective address */
512 if ((address
& mask
) != tlb
->EPN
) {
515 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
519 /* Generic TLB search function for PowerPC embedded implementations */
520 int ppcemb_tlb_search(CPUPPCState
*env
, target_ulong address
, uint32_t pid
)
526 /* Default return value is no match */
528 for (i
= 0; i
< env
->nb_tlb
; i
++) {
529 tlb
= &env
->tlb
.tlbe
[i
];
530 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, i
) == 0) {
539 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
540 target_ulong address
,
541 MMUAccessType access_type
)
545 int i
, ret
, zsel
, zpr
, pr
;
548 raddr
= (hwaddr
)-1ULL;
549 pr
= FIELD_EX64(env
->msr
, MSR
, PR
);
550 for (i
= 0; i
< env
->nb_tlb
; i
++) {
551 tlb
= &env
->tlb
.tlbe
[i
];
552 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
553 env
->spr
[SPR_40x_PID
], i
) < 0) {
556 zsel
= (tlb
->attr
>> 4) & 0xF;
557 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
558 qemu_log_mask(CPU_LOG_MMU
,
559 "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
560 __func__
, i
, zsel
, zpr
, access_type
, tlb
->attr
);
561 /* Check execute enable bit */
569 /* All accesses granted */
570 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
575 /* Raise Zone protection fault. */
576 env
->spr
[SPR_40x_ESR
] = 1 << 22;
584 /* Check from TLB entry */
585 ctx
->prot
= tlb
->prot
;
586 ret
= check_prot(ctx
->prot
, access_type
);
588 env
->spr
[SPR_40x_ESR
] = 0;
594 qemu_log_mask(CPU_LOG_MMU
, "%s: access granted " TARGET_FMT_lx
595 " => " HWADDR_FMT_plx
596 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
601 qemu_log_mask(CPU_LOG_MMU
, "%s: access refused " TARGET_FMT_lx
602 " => " HWADDR_FMT_plx
603 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
608 static int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
609 hwaddr
*raddr
, int *prot
, target_ulong address
,
610 MMUAccessType access_type
, int i
)
614 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
615 env
->spr
[SPR_BOOKE_PID
], i
) >= 0) {
617 /* Extend the physical address to 36 bits */
618 *raddr
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
623 if (env
->spr
[SPR_BOOKE_PID1
] &&
624 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
625 env
->spr
[SPR_BOOKE_PID1
], i
) >= 0) {
629 if (env
->spr
[SPR_BOOKE_PID2
] &&
630 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
631 env
->spr
[SPR_BOOKE_PID2
], i
) >= 0) {
635 qemu_log_mask(CPU_LOG_MMU
, "%s: TLB entry not found\n", __func__
);
640 if (FIELD_EX64(env
->msr
, MSR
, PR
)) {
641 prot2
= tlb
->prot
& 0xF;
643 prot2
= (tlb
->prot
>> 4) & 0xF;
646 /* Check the address space */
647 if ((access_type
== MMU_INST_FETCH
?
648 FIELD_EX64(env
->msr
, MSR
, IR
) :
649 FIELD_EX64(env
->msr
, MSR
, DR
)) != (tlb
->attr
& 1)) {
650 qemu_log_mask(CPU_LOG_MMU
, "%s: AS doesn't match\n", __func__
);
655 if (prot2
& prot_for_access_type(access_type
)) {
656 qemu_log_mask(CPU_LOG_MMU
, "%s: good TLB!\n", __func__
);
660 qemu_log_mask(CPU_LOG_MMU
, "%s: no prot match: %x\n", __func__
, prot2
);
661 return access_type
== MMU_INST_FETCH
? -3 : -2;
664 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
665 target_ulong address
,
666 MMUAccessType access_type
)
673 raddr
= (hwaddr
)-1ULL;
674 for (i
= 0; i
< env
->nb_tlb
; i
++) {
675 tlb
= &env
->tlb
.tlbe
[i
];
676 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
685 qemu_log_mask(CPU_LOG_MMU
, "%s: access granted " TARGET_FMT_lx
686 " => " HWADDR_FMT_plx
" %d %d\n", __func__
,
687 address
, ctx
->raddr
, ctx
->prot
, ret
);
689 qemu_log_mask(CPU_LOG_MMU
, "%s: access refused " TARGET_FMT_lx
690 " => " HWADDR_FMT_plx
" %d %d\n", __func__
,
691 address
, raddr
, ctx
->prot
, ret
);
697 hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
701 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
703 return 1024ULL << tlbm_size
;
706 /* TLB check function for MAS based SoftTLBs */
707 int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
, hwaddr
*raddrp
,
708 target_ulong address
, uint32_t pid
)
713 if (!FIELD_EX64(env
->msr
, MSR
, CM
)) {
714 /* In 32bit mode we can only address 32bit EAs */
715 address
= (uint32_t)address
;
718 /* Check valid flag */
719 if (!(tlb
->mas1
& MAS1_VALID
)) {
723 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
724 qemu_log_mask(CPU_LOG_MMU
, "%s: TLB ADDR=0x" TARGET_FMT_lx
725 " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64
" mask=0x%"
726 HWADDR_PRIx
" MAS7_3=0x%" PRIx64
" MAS8=0x%" PRIx32
"\n",
727 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
,
728 tlb
->mas7_3
, tlb
->mas8
);
731 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
732 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
736 /* Check effective address */
737 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
742 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
748 static bool is_epid_mmu(int mmu_idx
)
750 return mmu_idx
== PPC_TLB_EPID_STORE
|| mmu_idx
== PPC_TLB_EPID_LOAD
;
753 static uint32_t mmubooke206_esr(int mmu_idx
, MMUAccessType access_type
)
756 if (access_type
== MMU_DATA_STORE
) {
759 if (is_epid_mmu(mmu_idx
)) {
766 * Get EPID register given the mmu_idx. If this is regular load,
767 * construct the EPID access bits from current processor state
769 * Get the effective AS and PR bits and the PID. The PID is returned
770 * only if EPID load is requested, otherwise the caller must detect
771 * the correct EPID. Return true if valid EPID is returned.
773 static bool mmubooke206_get_as(CPUPPCState
*env
,
774 int mmu_idx
, uint32_t *epid_out
,
775 bool *as_out
, bool *pr_out
)
777 if (is_epid_mmu(mmu_idx
)) {
779 if (mmu_idx
== PPC_TLB_EPID_STORE
) {
780 epidr
= env
->spr
[SPR_BOOKE_EPSC
];
782 epidr
= env
->spr
[SPR_BOOKE_EPLC
];
784 *epid_out
= (epidr
& EPID_EPID
) >> EPID_EPID_SHIFT
;
785 *as_out
= !!(epidr
& EPID_EAS
);
786 *pr_out
= !!(epidr
& EPID_EPR
);
789 *as_out
= FIELD_EX64(env
->msr
, MSR
, DS
);
790 *pr_out
= FIELD_EX64(env
->msr
, MSR
, PR
);
795 /* Check if the tlb found by hashing really matches */
796 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
797 hwaddr
*raddr
, int *prot
,
798 target_ulong address
,
799 MMUAccessType access_type
, int mmu_idx
)
804 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
807 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
808 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
812 if (env
->spr
[SPR_BOOKE_PID1
] &&
813 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
814 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
818 if (env
->spr
[SPR_BOOKE_PID2
] &&
819 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
820 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
824 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
, epid
) >= 0) {
829 qemu_log_mask(CPU_LOG_MMU
, "%s: No TLB entry found for effective address "
830 "0x" TARGET_FMT_lx
"\n", __func__
, address
);
836 if (tlb
->mas7_3
& MAS3_UR
) {
839 if (tlb
->mas7_3
& MAS3_UW
) {
842 if (tlb
->mas7_3
& MAS3_UX
) {
846 if (tlb
->mas7_3
& MAS3_SR
) {
849 if (tlb
->mas7_3
& MAS3_SW
) {
852 if (tlb
->mas7_3
& MAS3_SX
) {
857 /* Check the address space and permissions */
858 if (access_type
== MMU_INST_FETCH
) {
859 /* There is no way to fetch code using epid load */
861 as
= FIELD_EX64(env
->msr
, MSR
, IR
);
864 if (as
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
865 qemu_log_mask(CPU_LOG_MMU
, "%s: AS doesn't match\n", __func__
);
870 if (prot2
& prot_for_access_type(access_type
)) {
871 qemu_log_mask(CPU_LOG_MMU
, "%s: good TLB!\n", __func__
);
875 qemu_log_mask(CPU_LOG_MMU
, "%s: no prot match: %x\n", __func__
, prot2
);
876 return access_type
== MMU_INST_FETCH
? -3 : -2;
879 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
880 target_ulong address
,
881 MMUAccessType access_type
,
889 raddr
= (hwaddr
)-1ULL;
891 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
892 int ways
= booke206_tlb_ways(env
, i
);
894 for (j
= 0; j
< ways
; j
++) {
895 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
899 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
900 access_type
, mmu_idx
);
911 qemu_log_mask(CPU_LOG_MMU
, "%s: access granted " TARGET_FMT_lx
912 " => " HWADDR_FMT_plx
" %d %d\n", __func__
, address
,
913 ctx
->raddr
, ctx
->prot
, ret
);
915 qemu_log_mask(CPU_LOG_MMU
, "%s: access refused " TARGET_FMT_lx
916 " => " HWADDR_FMT_plx
" %d %d\n", __func__
, address
,
917 raddr
, ctx
->prot
, ret
);
923 static const char *book3e_tsize_to_str
[32] = {
924 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
925 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
926 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
930 static void mmubooke_dump_mmu(CPUPPCState
*env
)
935 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
936 qemu_printf("Cannot access KVM TLB\n");
940 qemu_printf("\nTLB:\n");
941 qemu_printf("Effective Physical Size PID Prot "
944 entry
= &env
->tlb
.tlbe
[0];
945 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
948 uint64_t size
= (uint64_t)entry
->size
;
951 /* Check valid flag */
952 if (!(entry
->prot
& PAGE_VALID
)) {
956 mask
= ~(entry
->size
- 1);
957 ea
= entry
->EPN
& mask
;
958 pa
= entry
->RPN
& mask
;
959 /* Extend the physical address to 36 bits */
960 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
961 if (size
>= 1 * MiB
) {
962 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ MiB
);
964 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
/ KiB
);
966 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
967 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
968 entry
->prot
, entry
->attr
);
973 static void mmubooke206_dump_one_tlb(CPUPPCState
*env
, int tlbn
, int offset
,
979 qemu_printf("\nTLB%d:\n", tlbn
);
980 qemu_printf("Effective Physical Size TID TS SRWX"
981 " URWX WIMGE U0123\n");
983 entry
= &env
->tlb
.tlbm
[offset
];
984 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
988 if (!(entry
->mas1
& MAS1_VALID
)) {
992 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
993 size
= 1024ULL << tsize
;
994 ea
= entry
->mas2
& ~(size
- 1);
995 pa
= entry
->mas7_3
& ~(size
- 1);
997 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
998 " U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
999 (uint64_t)ea
, (uint64_t)pa
,
1000 book3e_tsize_to_str
[tsize
],
1001 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1002 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1003 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1004 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1005 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1006 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1007 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1008 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1009 entry
->mas2
& MAS2_W
? 'W' : '-',
1010 entry
->mas2
& MAS2_I
? 'I' : '-',
1011 entry
->mas2
& MAS2_M
? 'M' : '-',
1012 entry
->mas2
& MAS2_G
? 'G' : '-',
1013 entry
->mas2
& MAS2_E
? 'E' : '-',
1014 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1015 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1016 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1017 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1021 static void mmubooke206_dump_mmu(CPUPPCState
*env
)
1026 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1027 qemu_printf("Cannot access KVM TLB\n");
1031 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1032 int size
= booke206_tlb_size(env
, i
);
1038 mmubooke206_dump_one_tlb(env
, i
, offset
, size
);
1043 static void mmu6xx_dump_BATs(CPUPPCState
*env
, int type
)
1045 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
1046 target_ulong BEPIl
, BEPIu
, bl
;
1051 BATlt
= env
->IBAT
[1];
1052 BATut
= env
->IBAT
[0];
1055 BATlt
= env
->DBAT
[1];
1056 BATut
= env
->DBAT
[0];
1060 for (i
= 0; i
< env
->nb_BATs
; i
++) {
1063 BEPIu
= *BATu
& 0xF0000000;
1064 BEPIl
= *BATu
& 0x0FFE0000;
1065 bl
= (*BATu
& 0x00001FFC) << 15;
1066 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1067 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
1068 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
1069 type
== ACCESS_CODE
? "code" : "data", i
,
1070 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
1074 static void mmu6xx_dump_mmu(CPUPPCState
*env
)
1076 PowerPCCPU
*cpu
= env_archcpu(env
);
1079 int type
, way
, entry
, i
;
1081 qemu_printf("HTAB base = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_base(cpu
));
1082 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_mask(cpu
));
1084 qemu_printf("\nSegment registers:\n");
1085 for (i
= 0; i
< 32; i
++) {
1087 if (sr
& 0x80000000) {
1088 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1089 "CNTLR_SPEC=0x%05x\n", i
,
1090 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1091 sr
& 0x20000000 ? 1 : 0, (uint32_t)((sr
>> 20) & 0x1FF),
1092 (uint32_t)(sr
& 0xFFFFF));
1094 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i
,
1095 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1096 sr
& 0x20000000 ? 1 : 0, sr
& 0x10000000 ? 1 : 0,
1097 (uint32_t)(sr
& 0x00FFFFFF));
1101 qemu_printf("\nBATs:\n");
1102 mmu6xx_dump_BATs(env
, ACCESS_INT
);
1103 mmu6xx_dump_BATs(env
, ACCESS_CODE
);
1105 if (env
->id_tlbs
!= 1) {
1106 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1107 " for code and data\n");
1110 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1112 for (type
= 0; type
< 2; type
++) {
1113 for (way
= 0; way
< env
->nb_ways
; way
++) {
1114 for (entry
= env
->nb_tlb
* type
+ env
->tlb_per_way
* way
;
1115 entry
< (env
->nb_tlb
* type
+ env
->tlb_per_way
* (way
+ 1));
1118 tlb
= &env
->tlb
.tlb6
[entry
];
1119 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1120 TARGET_FMT_lx
" " TARGET_FMT_lx
"]\n",
1121 type
? "code" : "data", entry
% env
->nb_tlb
,
1123 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
1124 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
);
1130 void dump_mmu(CPUPPCState
*env
)
1132 switch (env
->mmu_model
) {
1133 case POWERPC_MMU_BOOKE
:
1134 mmubooke_dump_mmu(env
);
1136 case POWERPC_MMU_BOOKE206
:
1137 mmubooke206_dump_mmu(env
);
1139 case POWERPC_MMU_SOFT_6xx
:
1140 mmu6xx_dump_mmu(env
);
1142 #if defined(TARGET_PPC64)
1143 case POWERPC_MMU_64B
:
1144 case POWERPC_MMU_2_03
:
1145 case POWERPC_MMU_2_06
:
1146 case POWERPC_MMU_2_07
:
1147 dump_slb(env_archcpu(env
));
1149 case POWERPC_MMU_3_00
:
1150 if (ppc64_v3_radix(env_archcpu(env
))) {
1151 qemu_log_mask(LOG_UNIMP
, "%s: the PPC64 MMU is unsupported\n",
1154 dump_slb(env_archcpu(env
));
1159 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1163 static int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
, target_ulong eaddr
,
1164 MMUAccessType access_type
)
1167 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1169 switch (env
->mmu_model
) {
1170 case POWERPC_MMU_SOFT_6xx
:
1171 case POWERPC_MMU_SOFT_4xx
:
1172 case POWERPC_MMU_REAL
:
1173 case POWERPC_MMU_BOOKE
:
1174 ctx
->prot
|= PAGE_WRITE
;
1178 /* Caller's checks mean we should never get here for other models */
1179 g_assert_not_reached();
1185 int get_physical_address_wtlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1187 MMUAccessType access_type
, int type
,
1191 bool real_mode
= (type
== ACCESS_CODE
&& !FIELD_EX64(env
->msr
, MSR
, IR
)) ||
1192 (type
!= ACCESS_CODE
&& !FIELD_EX64(env
->msr
, MSR
, DR
));
1194 switch (env
->mmu_model
) {
1195 case POWERPC_MMU_SOFT_6xx
:
1197 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1199 /* Try to find a BAT */
1200 if (env
->nb_BATs
!= 0) {
1201 ret
= get_bat_6xx_tlb(env
, ctx
, eaddr
, access_type
);
1204 /* We didn't match any BAT entry or don't have BATs */
1205 ret
= get_segment_6xx_tlb(env
, ctx
, eaddr
, access_type
, type
);
1210 case POWERPC_MMU_SOFT_4xx
:
1212 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1214 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
, access_type
);
1217 case POWERPC_MMU_BOOKE
:
1218 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
, access_type
);
1220 case POWERPC_MMU_BOOKE206
:
1221 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, access_type
,
1224 case POWERPC_MMU_MPC8xx
:
1226 cpu_abort(env_cpu(env
), "MPC8xx MMU model is not implemented\n");
1228 case POWERPC_MMU_REAL
:
1230 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1232 cpu_abort(env_cpu(env
),
1233 "PowerPC in real mode do not do any translation\n");
1237 cpu_abort(env_cpu(env
), "Unknown or invalid MMU model\n");
1244 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1245 MMUAccessType access_type
, int mmu_idx
)
1249 uint32_t missed_tid
= 0;
1250 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
1252 if (access_type
== MMU_INST_FETCH
) {
1253 as
= FIELD_EX64(env
->msr
, MSR
, IR
);
1255 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1256 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1257 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1258 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1259 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1260 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1264 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1265 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1268 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1269 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1272 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1273 case MAS4_TIDSELD_PID0
:
1274 missed_tid
= env
->spr
[SPR_BOOKE_PID
];
1276 case MAS4_TIDSELD_PID1
:
1277 missed_tid
= env
->spr
[SPR_BOOKE_PID1
];
1279 case MAS4_TIDSELD_PID2
:
1280 missed_tid
= env
->spr
[SPR_BOOKE_PID2
];
1283 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1286 env
->spr
[SPR_BOOKE_MAS6
] |= missed_tid
<< 16;
1288 env
->spr
[SPR_BOOKE_MAS1
] |= (missed_tid
<< MAS1_TID_SHIFT
);
1291 /* next victim logic */
1292 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1294 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1295 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1298 /* Perform address translation */
1299 /* TODO: Split this by mmu_model. */
1300 static bool ppc_jumbo_xlate(PowerPCCPU
*cpu
, vaddr eaddr
,
1301 MMUAccessType access_type
,
1302 hwaddr
*raddrp
, int *psizep
, int *protp
,
1303 int mmu_idx
, bool guest_visible
)
1305 CPUState
*cs
= CPU(cpu
);
1306 CPUPPCState
*env
= &cpu
->env
;
1311 if (access_type
== MMU_INST_FETCH
) {
1314 } else if (guest_visible
) {
1316 type
= env
->access_type
;
1321 ret
= get_physical_address_wtlb(env
, &ctx
, eaddr
, access_type
,
1324 *raddrp
= ctx
.raddr
;
1326 *psizep
= TARGET_PAGE_BITS
;
1330 if (guest_visible
) {
1331 log_cpu_state_mask(CPU_LOG_MMU
, cs
, 0);
1332 if (type
== ACCESS_CODE
) {
1335 /* No matches in page tables or TLB */
1336 switch (env
->mmu_model
) {
1337 case POWERPC_MMU_SOFT_6xx
:
1338 cs
->exception_index
= POWERPC_EXCP_IFTLB
;
1339 env
->error_code
= 1 << 18;
1340 env
->spr
[SPR_IMISS
] = eaddr
;
1341 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1343 case POWERPC_MMU_SOFT_4xx
:
1344 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1345 env
->error_code
= 0;
1346 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1347 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1349 case POWERPC_MMU_BOOKE206
:
1350 booke206_update_mas_tlb_miss(env
, eaddr
, 2, mmu_idx
);
1352 case POWERPC_MMU_BOOKE
:
1353 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1354 env
->error_code
= 0;
1355 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1356 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, MMU_DATA_LOAD
);
1358 case POWERPC_MMU_MPC8xx
:
1359 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1360 case POWERPC_MMU_REAL
:
1361 cpu_abort(cs
, "PowerPC in real mode should never raise "
1362 "any MMU exceptions\n");
1364 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1368 /* Access rights violation */
1369 cs
->exception_index
= POWERPC_EXCP_ISI
;
1370 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1371 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1372 env
->error_code
= 0;
1374 env
->error_code
= 0x08000000;
1378 /* No execute protection violation */
1379 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1380 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1381 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1382 env
->error_code
= 0;
1384 env
->error_code
= 0x10000000;
1386 cs
->exception_index
= POWERPC_EXCP_ISI
;
1389 /* Direct store exception */
1390 /* No code fetch is allowed in direct-store areas */
1391 cs
->exception_index
= POWERPC_EXCP_ISI
;
1392 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1393 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1394 env
->error_code
= 0;
1396 env
->error_code
= 0x10000000;
1403 /* No matches in page tables or TLB */
1404 switch (env
->mmu_model
) {
1405 case POWERPC_MMU_SOFT_6xx
:
1406 if (access_type
== MMU_DATA_STORE
) {
1407 cs
->exception_index
= POWERPC_EXCP_DSTLB
;
1408 env
->error_code
= 1 << 16;
1410 cs
->exception_index
= POWERPC_EXCP_DLTLB
;
1411 env
->error_code
= 0;
1413 env
->spr
[SPR_DMISS
] = eaddr
;
1414 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1416 env
->error_code
|= ctx
.key
<< 19;
1417 env
->spr
[SPR_HASH1
] = ppc_hash32_hpt_base(cpu
) +
1418 get_pteg_offset32(cpu
, ctx
.hash
[0]);
1419 env
->spr
[SPR_HASH2
] = ppc_hash32_hpt_base(cpu
) +
1420 get_pteg_offset32(cpu
, ctx
.hash
[1]);
1422 case POWERPC_MMU_SOFT_4xx
:
1423 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1424 env
->error_code
= 0;
1425 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1426 if (access_type
== MMU_DATA_STORE
) {
1427 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1429 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1432 case POWERPC_MMU_MPC8xx
:
1434 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1435 case POWERPC_MMU_BOOKE206
:
1436 booke206_update_mas_tlb_miss(env
, eaddr
, access_type
, mmu_idx
);
1438 case POWERPC_MMU_BOOKE
:
1439 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1440 env
->error_code
= 0;
1441 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1442 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, access_type
);
1444 case POWERPC_MMU_REAL
:
1445 cpu_abort(cs
, "PowerPC in real mode should never raise "
1446 "any MMU exceptions\n");
1448 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1452 /* Access rights violation */
1453 cs
->exception_index
= POWERPC_EXCP_DSI
;
1454 env
->error_code
= 0;
1455 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
) {
1456 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1457 if (access_type
== MMU_DATA_STORE
) {
1458 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
1460 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1461 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1462 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1463 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, access_type
);
1465 env
->spr
[SPR_DAR
] = eaddr
;
1466 if (access_type
== MMU_DATA_STORE
) {
1467 env
->spr
[SPR_DSISR
] = 0x0A000000;
1469 env
->spr
[SPR_DSISR
] = 0x08000000;
1474 /* Direct store exception */
1477 /* Floating point load/store */
1478 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
1479 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
1480 env
->spr
[SPR_DAR
] = eaddr
;
1483 /* lwarx, ldarx or stwcx. */
1484 cs
->exception_index
= POWERPC_EXCP_DSI
;
1485 env
->error_code
= 0;
1486 env
->spr
[SPR_DAR
] = eaddr
;
1487 if (access_type
== MMU_DATA_STORE
) {
1488 env
->spr
[SPR_DSISR
] = 0x06000000;
1490 env
->spr
[SPR_DSISR
] = 0x04000000;
1494 /* eciwx or ecowx */
1495 cs
->exception_index
= POWERPC_EXCP_DSI
;
1496 env
->error_code
= 0;
1497 env
->spr
[SPR_DAR
] = eaddr
;
1498 if (access_type
== MMU_DATA_STORE
) {
1499 env
->spr
[SPR_DSISR
] = 0x06100000;
1501 env
->spr
[SPR_DSISR
] = 0x04100000;
1505 printf("DSI: invalid exception (%d)\n", ret
);
1506 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
1508 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
1509 env
->spr
[SPR_DAR
] = eaddr
;
1519 /*****************************************************************************/
1521 bool ppc_xlate(PowerPCCPU
*cpu
, vaddr eaddr
, MMUAccessType access_type
,
1522 hwaddr
*raddrp
, int *psizep
, int *protp
,
1523 int mmu_idx
, bool guest_visible
)
1525 switch (cpu
->env
.mmu_model
) {
1526 #if defined(TARGET_PPC64)
1527 case POWERPC_MMU_3_00
:
1528 if (ppc64_v3_radix(cpu
)) {
1529 return ppc_radix64_xlate(cpu
, eaddr
, access_type
, raddrp
,
1530 psizep
, protp
, mmu_idx
, guest_visible
);
1533 case POWERPC_MMU_64B
:
1534 case POWERPC_MMU_2_03
:
1535 case POWERPC_MMU_2_06
:
1536 case POWERPC_MMU_2_07
:
1537 return ppc_hash64_xlate(cpu
, eaddr
, access_type
,
1538 raddrp
, psizep
, protp
, mmu_idx
, guest_visible
);
1541 case POWERPC_MMU_32B
:
1542 return ppc_hash32_xlate(cpu
, eaddr
, access_type
, raddrp
,
1543 psizep
, protp
, mmu_idx
, guest_visible
);
1546 return ppc_jumbo_xlate(cpu
, eaddr
, access_type
, raddrp
,
1547 psizep
, protp
, mmu_idx
, guest_visible
);
1551 hwaddr
ppc_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
1553 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1558 * Some MMUs have separate TLBs for code and data. If we only
1559 * try an MMU_DATA_LOAD, we may not be able to read instructions
1560 * mapped by code TLBs, so we also try a MMU_INST_FETCH.
1562 if (ppc_xlate(cpu
, addr
, MMU_DATA_LOAD
, &raddr
, &s
, &p
,
1563 cpu_mmu_index(&cpu
->env
, false), false) ||
1564 ppc_xlate(cpu
, addr
, MMU_INST_FETCH
, &raddr
, &s
, &p
,
1565 cpu_mmu_index(&cpu
->env
, true), false)) {
1566 return raddr
& TARGET_PAGE_MASK
;