2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
23 #include "sysemu/kvm.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/exec-all.h"
29 #include "helper_regs.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/qemu-print.h"
34 #include "mmu-book3s-v3.h"
35 #include "mmu-radix64.h"
37 /* #define DUMP_PAGE_TABLES */
39 void ppc_store_sdr1(CPUPPCState
*env
, target_ulong value
)
41 PowerPCCPU
*cpu
= env_archcpu(env
);
42 qemu_log_mask(CPU_LOG_MMU
, "%s: " TARGET_FMT_lx
"\n", __func__
, value
);
43 assert(!cpu
->env
.has_hv_mode
|| !cpu
->vhyp
);
44 #if defined(TARGET_PPC64)
45 if (mmu_is_64bit(env
->mmu_model
)) {
46 target_ulong sdr_mask
= SDR_64_HTABORG
| SDR_64_HTABSIZE
;
47 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
49 if (value
& ~sdr_mask
) {
50 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid bits 0x"TARGET_FMT_lx
51 " set in SDR1", value
& ~sdr_mask
);
55 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
56 " stored in SDR1", htabsize
);
60 #endif /* defined(TARGET_PPC64) */
61 /* FIXME: Should check for valid HTABMASK values in 32-bit case */
62 env
->spr
[SPR_SDR1
] = value
;
65 /*****************************************************************************/
66 /* PowerPC MMU emulation */
68 static int pp_check(int key
, int pp
, int nx
)
72 /* Compute access rights */
95 access
= PAGE_READ
| PAGE_WRITE
;
106 static int check_prot(int prot
, MMUAccessType access_type
)
108 return prot
& prot_for_access_type(access_type
) ? 0 : -2;
111 int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
112 int way
, int is_code
)
116 /* Select TLB num in a way from address */
117 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
119 nr
+= env
->tlb_per_way
* way
;
120 /* 6xx have separate TLBs for instructions and data */
121 if (is_code
&& env
->id_tlbs
== 1) {
128 static int ppc6xx_tlb_pte_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
129 target_ulong pte1
, int h
,
130 MMUAccessType access_type
)
132 target_ulong ptem
, mmask
;
133 int access
, ret
, pteh
, ptev
, pp
;
136 /* Check validity and table match */
137 ptev
= pte_is_valid(pte0
);
138 pteh
= (pte0
>> 6) & 1;
139 if (ptev
&& h
== pteh
) {
140 /* Check vsid & api */
141 ptem
= pte0
& PTE_PTEM_MASK
;
142 mmask
= PTE_CHECK_MASK
;
143 pp
= pte1
& 0x00000003;
144 if (ptem
== ctx
->ptem
) {
145 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
146 /* all matches should have equal RPN, WIMG & PP */
147 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
148 qemu_log_mask(CPU_LOG_MMU
, "Bad RPN/WIMG/PP\n");
152 /* Compute access rights */
153 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
154 /* Keep the matching PTE information */
157 ret
= check_prot(ctx
->prot
, access_type
);
160 qemu_log_mask(CPU_LOG_MMU
, "PTE access granted !\n");
162 /* Access right violation */
163 qemu_log_mask(CPU_LOG_MMU
, "PTE access rejected\n");
171 static int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
172 int ret
, MMUAccessType access_type
)
176 /* Update page flags */
177 if (!(*pte1p
& 0x00000100)) {
178 /* Update accessed flag */
179 *pte1p
|= 0x00000100;
182 if (!(*pte1p
& 0x00000080)) {
183 if (access_type
== MMU_DATA_STORE
&& ret
== 0) {
184 /* Update changed flag */
185 *pte1p
|= 0x00000080;
188 /* Force page fault for first write access */
189 ctx
->prot
&= ~PAGE_WRITE
;
196 /* Software driven TLB helpers */
198 static int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
199 target_ulong eaddr
, MMUAccessType access_type
)
206 ret
= -1; /* No TLB found */
207 for (way
= 0; way
< env
->nb_ways
; way
++) {
208 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, access_type
== MMU_INST_FETCH
);
209 tlb
= &env
->tlb
.tlb6
[nr
];
210 /* This test "emulates" the PTE index match for hardware TLBs */
211 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
212 qemu_log_mask(CPU_LOG_MMU
, "TLB %d/%d %s [" TARGET_FMT_lx
213 " " TARGET_FMT_lx
"] <> " TARGET_FMT_lx
"\n",
215 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
216 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
219 qemu_log_mask(CPU_LOG_MMU
, "TLB %d/%d %s " TARGET_FMT_lx
" <> "
220 TARGET_FMT_lx
" " TARGET_FMT_lx
" %c %c\n",
222 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
223 tlb
->EPN
, eaddr
, tlb
->pte1
,
224 access_type
== MMU_DATA_STORE
? 'S' : 'L',
225 access_type
== MMU_INST_FETCH
? 'I' : 'D');
226 switch (ppc6xx_tlb_pte_check(ctx
, tlb
->pte0
, tlb
->pte1
,
229 /* TLB inconsistency */
232 /* Access violation */
243 * XXX: we should go on looping to check all TLBs
244 * consistency but we can speed-up the whole thing as
245 * the result would be undefined if TLBs are not
255 qemu_log_mask(CPU_LOG_MMU
, "found TLB at addr " TARGET_FMT_plx
256 " prot=%01x ret=%d\n",
257 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
258 /* Update page flags */
259 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, access_type
);
265 /* Perform BAT hit & translation */
266 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
267 int *validp
, int *protp
, target_ulong
*BATu
,
273 bl
= (*BATu
& 0x00001FFC) << 15;
276 if ((!FIELD_EX64(env
->msr
, MSR
, PR
) && (*BATu
& 0x00000002)) ||
277 (FIELD_EX64(env
->msr
, MSR
, PR
) && (*BATu
& 0x00000001))) {
279 pp
= *BATl
& 0x00000003;
281 prot
= PAGE_READ
| PAGE_EXEC
;
292 static int get_bat_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
293 target_ulong
virtual, MMUAccessType access_type
)
295 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
296 target_ulong BEPIl
, BEPIu
, bl
;
299 bool ifetch
= access_type
== MMU_INST_FETCH
;
301 qemu_log_mask(CPU_LOG_MMU
, "%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
302 ifetch
? 'I' : 'D', virtual);
304 BATlt
= env
->IBAT
[1];
305 BATut
= env
->IBAT
[0];
307 BATlt
= env
->DBAT
[1];
308 BATut
= env
->DBAT
[0];
310 for (i
= 0; i
< env
->nb_BATs
; i
++) {
313 BEPIu
= *BATu
& 0xF0000000;
314 BEPIl
= *BATu
& 0x0FFE0000;
315 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
316 qemu_log_mask(CPU_LOG_MMU
, "%s: %cBAT%d v " TARGET_FMT_lx
" BATu "
317 TARGET_FMT_lx
" BATl " TARGET_FMT_lx
"\n", __func__
,
318 ifetch
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
319 if ((virtual & 0xF0000000) == BEPIu
&&
320 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
323 /* Get physical address */
324 ctx
->raddr
= (*BATl
& 0xF0000000) |
325 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
326 (virtual & 0x0001F000);
327 /* Compute access rights */
329 ret
= check_prot(ctx
->prot
, access_type
);
331 qemu_log_mask(CPU_LOG_MMU
, "BAT %d match: r " TARGET_FMT_plx
332 " prot=%c%c\n", i
, ctx
->raddr
,
333 ctx
->prot
& PAGE_READ
? 'R' : '-',
334 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
341 if (qemu_log_enabled()) {
342 qemu_log_mask(CPU_LOG_MMU
, "no BAT match for "
343 TARGET_FMT_lx
":\n", virtual);
344 for (i
= 0; i
< 4; i
++) {
347 BEPIu
= *BATu
& 0xF0000000;
348 BEPIl
= *BATu
& 0x0FFE0000;
349 bl
= (*BATu
& 0x00001FFC) << 15;
350 qemu_log_mask(CPU_LOG_MMU
, "%s: %cBAT%d v "
351 TARGET_FMT_lx
" BATu " TARGET_FMT_lx
352 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
353 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
354 __func__
, ifetch
? 'I' : 'D', i
, virtual,
355 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
363 /* Perform segment based translation */
364 static int get_segment_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
365 target_ulong eaddr
, MMUAccessType access_type
,
368 PowerPCCPU
*cpu
= env_archcpu(env
);
371 int ds
, target_page_bits
;
374 target_ulong sr
, pgidx
;
376 pr
= FIELD_EX64(env
->msr
, MSR
, PR
);
379 sr
= env
->sr
[eaddr
>> 28];
380 ctx
->key
= (((sr
& 0x20000000) && pr
) ||
381 ((sr
& 0x40000000) && !pr
)) ? 1 : 0;
382 ds
= sr
& 0x80000000 ? 1 : 0;
383 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
384 vsid
= sr
& 0x00FFFFFF;
385 target_page_bits
= TARGET_PAGE_BITS
;
386 qemu_log_mask(CPU_LOG_MMU
,
387 "Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
388 " nip=" TARGET_FMT_lx
" lr=" TARGET_FMT_lx
389 " ir=%d dr=%d pr=%d %d t=%d\n",
390 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
,
391 (int)FIELD_EX64(env
->msr
, MSR
, IR
),
392 (int)FIELD_EX64(env
->msr
, MSR
, DR
), pr
? 1 : 0,
393 access_type
== MMU_DATA_STORE
, type
);
394 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
396 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
398 qemu_log_mask(CPU_LOG_MMU
,
399 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
400 ctx
->key
, ds
, ctx
->nx
, vsid
);
403 /* Check if instruction fetch is allowed, if needed */
404 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
405 /* Page address translation */
406 qemu_log_mask(CPU_LOG_MMU
, "htab_base " TARGET_FMT_plx
407 " htab_mask " TARGET_FMT_plx
408 " hash " TARGET_FMT_plx
"\n",
409 ppc_hash32_hpt_base(cpu
), ppc_hash32_hpt_mask(cpu
), hash
);
411 ctx
->hash
[1] = ~hash
;
413 /* Initialize real address with an invalid value */
414 ctx
->raddr
= (hwaddr
)-1ULL;
415 /* Software TLB search */
416 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, access_type
);
417 #if defined(DUMP_PAGE_TABLES)
418 if (qemu_loglevel_mask(CPU_LOG_MMU
)) {
419 CPUState
*cs
= env_cpu(env
);
421 uint32_t a0
, a1
, a2
, a3
;
423 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
424 "\n", ppc_hash32_hpt_base(cpu
),
425 ppc_hash32_hpt_mask(cpu
) + 0x80);
426 for (curaddr
= ppc_hash32_hpt_base(cpu
);
427 curaddr
< (ppc_hash32_hpt_base(cpu
)
428 + ppc_hash32_hpt_mask(cpu
) + 0x80);
430 a0
= ldl_phys(cs
->as
, curaddr
);
431 a1
= ldl_phys(cs
->as
, curaddr
+ 4);
432 a2
= ldl_phys(cs
->as
, curaddr
+ 8);
433 a3
= ldl_phys(cs
->as
, curaddr
+ 12);
434 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
435 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
436 curaddr
, a0
, a1
, a2
, a3
);
442 qemu_log_mask(CPU_LOG_MMU
, "No access allowed\n");
446 qemu_log_mask(CPU_LOG_MMU
, "direct store...\n");
447 /* Direct-store segment : absolutely *BUGGY* for now */
451 /* Integer load/store : only access allowed */
454 /* No code fetch is allowed in direct-store areas */
457 /* Floating point load/store */
460 /* lwarx, ldarx or srwcx. */
464 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
466 * Should make the instruction do no-op. As it already do
467 * no-op, it's quite easy :-)
475 qemu_log_mask(CPU_LOG_MMU
, "ERROR: instruction should not need "
476 "address translation\n");
479 if ((access_type
== MMU_DATA_STORE
|| ctx
->key
!= 1) &&
480 (access_type
== MMU_DATA_LOAD
|| ctx
->key
!= 0)) {
491 /* Generic TLB check function for embedded PowerPC implementations */
492 int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
494 target_ulong address
, uint32_t pid
, int ext
,
499 /* Check valid flag */
500 if (!(tlb
->prot
& PAGE_VALID
)) {
503 mask
= ~(tlb
->size
- 1);
504 qemu_log_mask(CPU_LOG_MMU
, "%s: TLB %d address " TARGET_FMT_lx
505 " PID %u <=> " TARGET_FMT_lx
" " TARGET_FMT_lx
" %u %x\n",
506 __func__
, i
, address
, pid
, tlb
->EPN
,
507 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
509 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
512 /* Check effective address */
513 if ((address
& mask
) != tlb
->EPN
) {
516 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
518 /* Extend the physical address to 36 bits */
519 *raddrp
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
525 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
526 target_ulong address
,
527 MMUAccessType access_type
)
531 int i
, ret
, zsel
, zpr
, pr
;
534 raddr
= (hwaddr
)-1ULL;
535 pr
= FIELD_EX64(env
->msr
, MSR
, PR
);
536 for (i
= 0; i
< env
->nb_tlb
; i
++) {
537 tlb
= &env
->tlb
.tlbe
[i
];
538 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
539 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
542 zsel
= (tlb
->attr
>> 4) & 0xF;
543 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
544 qemu_log_mask(CPU_LOG_MMU
,
545 "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
546 __func__
, i
, zsel
, zpr
, access_type
, tlb
->attr
);
547 /* Check execute enable bit */
555 /* All accesses granted */
556 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
561 /* Raise Zone protection fault. */
562 env
->spr
[SPR_40x_ESR
] = 1 << 22;
570 /* Check from TLB entry */
571 ctx
->prot
= tlb
->prot
;
572 ret
= check_prot(ctx
->prot
, access_type
);
574 env
->spr
[SPR_40x_ESR
] = 0;
580 qemu_log_mask(CPU_LOG_MMU
, "%s: access granted " TARGET_FMT_lx
581 " => " TARGET_FMT_plx
582 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
587 qemu_log_mask(CPU_LOG_MMU
, "%s: access refused " TARGET_FMT_lx
588 " => " TARGET_FMT_plx
589 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
594 static int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
595 hwaddr
*raddr
, int *prot
, target_ulong address
,
596 MMUAccessType access_type
, int i
)
600 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
601 env
->spr
[SPR_BOOKE_PID
],
602 !env
->nb_pids
, i
) >= 0) {
606 if (env
->spr
[SPR_BOOKE_PID1
] &&
607 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
608 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
612 if (env
->spr
[SPR_BOOKE_PID2
] &&
613 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
614 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
618 qemu_log_mask(CPU_LOG_MMU
, "%s: TLB entry not found\n", __func__
);
623 if (FIELD_EX64(env
->msr
, MSR
, PR
)) {
624 prot2
= tlb
->prot
& 0xF;
626 prot2
= (tlb
->prot
>> 4) & 0xF;
629 /* Check the address space */
630 if ((access_type
== MMU_INST_FETCH
?
631 FIELD_EX64(env
->msr
, MSR
, IR
) :
632 FIELD_EX64(env
->msr
, MSR
, DR
)) != (tlb
->attr
& 1)) {
633 qemu_log_mask(CPU_LOG_MMU
, "%s: AS doesn't match\n", __func__
);
638 if (prot2
& prot_for_access_type(access_type
)) {
639 qemu_log_mask(CPU_LOG_MMU
, "%s: good TLB!\n", __func__
);
643 qemu_log_mask(CPU_LOG_MMU
, "%s: no prot match: %x\n", __func__
, prot2
);
644 return access_type
== MMU_INST_FETCH
? -3 : -2;
647 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
648 target_ulong address
,
649 MMUAccessType access_type
)
656 raddr
= (hwaddr
)-1ULL;
657 for (i
= 0; i
< env
->nb_tlb
; i
++) {
658 tlb
= &env
->tlb
.tlbe
[i
];
659 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
668 qemu_log_mask(CPU_LOG_MMU
, "%s: access granted " TARGET_FMT_lx
669 " => " TARGET_FMT_plx
" %d %d\n", __func__
,
670 address
, ctx
->raddr
, ctx
->prot
, ret
);
672 qemu_log_mask(CPU_LOG_MMU
, "%s: access refused " TARGET_FMT_lx
673 " => " TARGET_FMT_plx
" %d %d\n", __func__
,
674 address
, raddr
, ctx
->prot
, ret
);
680 hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
,
685 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
687 return 1024ULL << tlbm_size
;
690 /* TLB check function for MAS based SoftTLBs */
691 int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
692 hwaddr
*raddrp
, target_ulong address
,
698 if (!FIELD_EX64(env
->msr
, MSR
, CM
)) {
699 /* In 32bit mode we can only address 32bit EAs */
700 address
= (uint32_t)address
;
703 /* Check valid flag */
704 if (!(tlb
->mas1
& MAS1_VALID
)) {
708 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
709 qemu_log_mask(CPU_LOG_MMU
, "%s: TLB ADDR=0x" TARGET_FMT_lx
710 " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64
" mask=0x%"
711 HWADDR_PRIx
" MAS7_3=0x%" PRIx64
" MAS8=0x%" PRIx32
"\n",
712 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
,
713 tlb
->mas7_3
, tlb
->mas8
);
716 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
717 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
721 /* Check effective address */
722 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
727 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
733 static bool is_epid_mmu(int mmu_idx
)
735 return mmu_idx
== PPC_TLB_EPID_STORE
|| mmu_idx
== PPC_TLB_EPID_LOAD
;
738 static uint32_t mmubooke206_esr(int mmu_idx
, MMUAccessType access_type
)
741 if (access_type
== MMU_DATA_STORE
) {
744 if (is_epid_mmu(mmu_idx
)) {
751 * Get EPID register given the mmu_idx. If this is regular load,
752 * construct the EPID access bits from current processor state
754 * Get the effective AS and PR bits and the PID. The PID is returned
755 * only if EPID load is requested, otherwise the caller must detect
756 * the correct EPID. Return true if valid EPID is returned.
758 static bool mmubooke206_get_as(CPUPPCState
*env
,
759 int mmu_idx
, uint32_t *epid_out
,
760 bool *as_out
, bool *pr_out
)
762 if (is_epid_mmu(mmu_idx
)) {
764 if (mmu_idx
== PPC_TLB_EPID_STORE
) {
765 epidr
= env
->spr
[SPR_BOOKE_EPSC
];
767 epidr
= env
->spr
[SPR_BOOKE_EPLC
];
769 *epid_out
= (epidr
& EPID_EPID
) >> EPID_EPID_SHIFT
;
770 *as_out
= !!(epidr
& EPID_EAS
);
771 *pr_out
= !!(epidr
& EPID_EPR
);
774 *as_out
= FIELD_EX64(env
->msr
, MSR
, DS
);
775 *pr_out
= FIELD_EX64(env
->msr
, MSR
, PR
);
780 /* Check if the tlb found by hashing really matches */
781 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
782 hwaddr
*raddr
, int *prot
,
783 target_ulong address
,
784 MMUAccessType access_type
, int mmu_idx
)
789 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
792 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
793 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
797 if (env
->spr
[SPR_BOOKE_PID1
] &&
798 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
799 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
803 if (env
->spr
[SPR_BOOKE_PID2
] &&
804 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
805 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
809 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
, epid
) >= 0) {
814 qemu_log_mask(CPU_LOG_MMU
, "%s: TLB entry not found\n", __func__
);
820 if (tlb
->mas7_3
& MAS3_UR
) {
823 if (tlb
->mas7_3
& MAS3_UW
) {
826 if (tlb
->mas7_3
& MAS3_UX
) {
830 if (tlb
->mas7_3
& MAS3_SR
) {
833 if (tlb
->mas7_3
& MAS3_SW
) {
836 if (tlb
->mas7_3
& MAS3_SX
) {
841 /* Check the address space and permissions */
842 if (access_type
== MMU_INST_FETCH
) {
843 /* There is no way to fetch code using epid load */
845 as
= FIELD_EX64(env
->msr
, MSR
, IR
);
848 if (as
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
849 qemu_log_mask(CPU_LOG_MMU
, "%s: AS doesn't match\n", __func__
);
854 if (prot2
& prot_for_access_type(access_type
)) {
855 qemu_log_mask(CPU_LOG_MMU
, "%s: good TLB!\n", __func__
);
859 qemu_log_mask(CPU_LOG_MMU
, "%s: no prot match: %x\n", __func__
, prot2
);
860 return access_type
== MMU_INST_FETCH
? -3 : -2;
863 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
864 target_ulong address
,
865 MMUAccessType access_type
,
873 raddr
= (hwaddr
)-1ULL;
875 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
876 int ways
= booke206_tlb_ways(env
, i
);
878 for (j
= 0; j
< ways
; j
++) {
879 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
883 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
884 access_type
, mmu_idx
);
895 qemu_log_mask(CPU_LOG_MMU
, "%s: access granted " TARGET_FMT_lx
896 " => " TARGET_FMT_plx
" %d %d\n", __func__
, address
,
897 ctx
->raddr
, ctx
->prot
, ret
);
899 qemu_log_mask(CPU_LOG_MMU
, "%s: access refused " TARGET_FMT_lx
900 " => " TARGET_FMT_plx
" %d %d\n", __func__
, address
,
901 raddr
, ctx
->prot
, ret
);
907 static const char *book3e_tsize_to_str
[32] = {
908 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
909 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
910 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
914 static void mmubooke_dump_mmu(CPUPPCState
*env
)
919 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
920 qemu_printf("Cannot access KVM TLB\n");
924 qemu_printf("\nTLB:\n");
925 qemu_printf("Effective Physical Size PID Prot "
928 entry
= &env
->tlb
.tlbe
[0];
929 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
932 uint64_t size
= (uint64_t)entry
->size
;
935 /* Check valid flag */
936 if (!(entry
->prot
& PAGE_VALID
)) {
940 mask
= ~(entry
->size
- 1);
941 ea
= entry
->EPN
& mask
;
942 pa
= entry
->RPN
& mask
;
943 /* Extend the physical address to 36 bits */
944 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
945 if (size
>= 1 * MiB
) {
946 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ MiB
);
948 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
/ KiB
);
950 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
951 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
952 entry
->prot
, entry
->attr
);
957 static void mmubooke206_dump_one_tlb(CPUPPCState
*env
, int tlbn
, int offset
,
963 qemu_printf("\nTLB%d:\n", tlbn
);
964 qemu_printf("Effective Physical Size TID TS SRWX"
965 " URWX WIMGE U0123\n");
967 entry
= &env
->tlb
.tlbm
[offset
];
968 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
972 if (!(entry
->mas1
& MAS1_VALID
)) {
976 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
977 size
= 1024ULL << tsize
;
978 ea
= entry
->mas2
& ~(size
- 1);
979 pa
= entry
->mas7_3
& ~(size
- 1);
981 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
982 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
983 (uint64_t)ea
, (uint64_t)pa
,
984 book3e_tsize_to_str
[tsize
],
985 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
986 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
987 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
988 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
989 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
990 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
991 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
992 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
993 entry
->mas2
& MAS2_W
? 'W' : '-',
994 entry
->mas2
& MAS2_I
? 'I' : '-',
995 entry
->mas2
& MAS2_M
? 'M' : '-',
996 entry
->mas2
& MAS2_G
? 'G' : '-',
997 entry
->mas2
& MAS2_E
? 'E' : '-',
998 entry
->mas7_3
& MAS3_U0
? '0' : '-',
999 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1000 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1001 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1005 static void mmubooke206_dump_mmu(CPUPPCState
*env
)
1010 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1011 qemu_printf("Cannot access KVM TLB\n");
1015 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1016 int size
= booke206_tlb_size(env
, i
);
1022 mmubooke206_dump_one_tlb(env
, i
, offset
, size
);
1027 static void mmu6xx_dump_BATs(CPUPPCState
*env
, int type
)
1029 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
1030 target_ulong BEPIl
, BEPIu
, bl
;
1035 BATlt
= env
->IBAT
[1];
1036 BATut
= env
->IBAT
[0];
1039 BATlt
= env
->DBAT
[1];
1040 BATut
= env
->DBAT
[0];
1044 for (i
= 0; i
< env
->nb_BATs
; i
++) {
1047 BEPIu
= *BATu
& 0xF0000000;
1048 BEPIl
= *BATu
& 0x0FFE0000;
1049 bl
= (*BATu
& 0x00001FFC) << 15;
1050 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1051 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
1052 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
1053 type
== ACCESS_CODE
? "code" : "data", i
,
1054 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
1058 static void mmu6xx_dump_mmu(CPUPPCState
*env
)
1060 PowerPCCPU
*cpu
= env_archcpu(env
);
1063 int type
, way
, entry
, i
;
1065 qemu_printf("HTAB base = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_base(cpu
));
1066 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_mask(cpu
));
1068 qemu_printf("\nSegment registers:\n");
1069 for (i
= 0; i
< 32; i
++) {
1071 if (sr
& 0x80000000) {
1072 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1073 "CNTLR_SPEC=0x%05x\n", i
,
1074 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1075 sr
& 0x20000000 ? 1 : 0, (uint32_t)((sr
>> 20) & 0x1FF),
1076 (uint32_t)(sr
& 0xFFFFF));
1078 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i
,
1079 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1080 sr
& 0x20000000 ? 1 : 0, sr
& 0x10000000 ? 1 : 0,
1081 (uint32_t)(sr
& 0x00FFFFFF));
1085 qemu_printf("\nBATs:\n");
1086 mmu6xx_dump_BATs(env
, ACCESS_INT
);
1087 mmu6xx_dump_BATs(env
, ACCESS_CODE
);
1089 if (env
->id_tlbs
!= 1) {
1090 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1091 " for code and data\n");
1094 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1096 for (type
= 0; type
< 2; type
++) {
1097 for (way
= 0; way
< env
->nb_ways
; way
++) {
1098 for (entry
= env
->nb_tlb
* type
+ env
->tlb_per_way
* way
;
1099 entry
< (env
->nb_tlb
* type
+ env
->tlb_per_way
* (way
+ 1));
1102 tlb
= &env
->tlb
.tlb6
[entry
];
1103 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1104 TARGET_FMT_lx
" " TARGET_FMT_lx
"]\n",
1105 type
? "code" : "data", entry
% env
->nb_tlb
,
1107 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
1108 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
);
1114 void dump_mmu(CPUPPCState
*env
)
1116 switch (env
->mmu_model
) {
1117 case POWERPC_MMU_BOOKE
:
1118 mmubooke_dump_mmu(env
);
1120 case POWERPC_MMU_BOOKE206
:
1121 mmubooke206_dump_mmu(env
);
1123 case POWERPC_MMU_SOFT_6xx
:
1124 mmu6xx_dump_mmu(env
);
1126 #if defined(TARGET_PPC64)
1127 case POWERPC_MMU_64B
:
1128 case POWERPC_MMU_2_03
:
1129 case POWERPC_MMU_2_06
:
1130 case POWERPC_MMU_2_07
:
1131 dump_slb(env_archcpu(env
));
1133 case POWERPC_MMU_3_00
:
1134 if (ppc64_v3_radix(env_archcpu(env
))) {
1135 qemu_log_mask(LOG_UNIMP
, "%s: the PPC64 MMU is unsupported\n",
1138 dump_slb(env_archcpu(env
));
1143 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1147 static int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
, target_ulong eaddr
,
1148 MMUAccessType access_type
)
1151 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1153 switch (env
->mmu_model
) {
1154 case POWERPC_MMU_SOFT_6xx
:
1155 case POWERPC_MMU_SOFT_4xx
:
1156 case POWERPC_MMU_REAL
:
1157 case POWERPC_MMU_BOOKE
:
1158 ctx
->prot
|= PAGE_WRITE
;
1162 /* Caller's checks mean we should never get here for other models */
1163 g_assert_not_reached();
1169 int get_physical_address_wtlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1171 MMUAccessType access_type
, int type
,
1175 bool real_mode
= (type
== ACCESS_CODE
&& !FIELD_EX64(env
->msr
, MSR
, IR
)) ||
1176 (type
!= ACCESS_CODE
&& !FIELD_EX64(env
->msr
, MSR
, DR
));
1178 switch (env
->mmu_model
) {
1179 case POWERPC_MMU_SOFT_6xx
:
1181 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1183 /* Try to find a BAT */
1184 if (env
->nb_BATs
!= 0) {
1185 ret
= get_bat_6xx_tlb(env
, ctx
, eaddr
, access_type
);
1188 /* We didn't match any BAT entry or don't have BATs */
1189 ret
= get_segment_6xx_tlb(env
, ctx
, eaddr
, access_type
, type
);
1194 case POWERPC_MMU_SOFT_4xx
:
1196 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1198 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
, access_type
);
1201 case POWERPC_MMU_BOOKE
:
1202 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
, access_type
);
1204 case POWERPC_MMU_BOOKE206
:
1205 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, access_type
,
1208 case POWERPC_MMU_MPC8xx
:
1210 cpu_abort(env_cpu(env
), "MPC8xx MMU model is not implemented\n");
1212 case POWERPC_MMU_REAL
:
1214 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1216 cpu_abort(env_cpu(env
),
1217 "PowerPC in real mode do not do any translation\n");
1221 cpu_abort(env_cpu(env
), "Unknown or invalid MMU model\n");
1228 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1229 MMUAccessType access_type
, int mmu_idx
)
1233 uint32_t missed_tid
= 0;
1234 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
1236 if (access_type
== MMU_INST_FETCH
) {
1237 as
= FIELD_EX64(env
->msr
, MSR
, IR
);
1239 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1240 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1241 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1242 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1243 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1244 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1248 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1249 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1252 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1253 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1256 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1257 case MAS4_TIDSELD_PID0
:
1258 missed_tid
= env
->spr
[SPR_BOOKE_PID
];
1260 case MAS4_TIDSELD_PID1
:
1261 missed_tid
= env
->spr
[SPR_BOOKE_PID1
];
1263 case MAS4_TIDSELD_PID2
:
1264 missed_tid
= env
->spr
[SPR_BOOKE_PID2
];
1267 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1270 env
->spr
[SPR_BOOKE_MAS6
] |= missed_tid
<< 16;
1272 env
->spr
[SPR_BOOKE_MAS1
] |= (missed_tid
<< MAS1_TID_SHIFT
);
1275 /* next victim logic */
1276 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1278 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1279 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1282 /* Perform address translation */
1283 /* TODO: Split this by mmu_model. */
1284 static bool ppc_jumbo_xlate(PowerPCCPU
*cpu
, vaddr eaddr
,
1285 MMUAccessType access_type
,
1286 hwaddr
*raddrp
, int *psizep
, int *protp
,
1287 int mmu_idx
, bool guest_visible
)
1289 CPUState
*cs
= CPU(cpu
);
1290 CPUPPCState
*env
= &cpu
->env
;
1295 if (access_type
== MMU_INST_FETCH
) {
1298 } else if (guest_visible
) {
1300 type
= env
->access_type
;
1305 ret
= get_physical_address_wtlb(env
, &ctx
, eaddr
, access_type
,
1308 *raddrp
= ctx
.raddr
;
1310 *psizep
= TARGET_PAGE_BITS
;
1314 if (guest_visible
) {
1315 log_cpu_state_mask(CPU_LOG_MMU
, cs
, 0);
1316 if (type
== ACCESS_CODE
) {
1319 /* No matches in page tables or TLB */
1320 switch (env
->mmu_model
) {
1321 case POWERPC_MMU_SOFT_6xx
:
1322 cs
->exception_index
= POWERPC_EXCP_IFTLB
;
1323 env
->error_code
= 1 << 18;
1324 env
->spr
[SPR_IMISS
] = eaddr
;
1325 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1327 case POWERPC_MMU_SOFT_4xx
:
1328 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1329 env
->error_code
= 0;
1330 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1331 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1333 case POWERPC_MMU_BOOKE206
:
1334 booke206_update_mas_tlb_miss(env
, eaddr
, 2, mmu_idx
);
1336 case POWERPC_MMU_BOOKE
:
1337 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1338 env
->error_code
= 0;
1339 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1340 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, MMU_DATA_LOAD
);
1342 case POWERPC_MMU_MPC8xx
:
1343 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1344 case POWERPC_MMU_REAL
:
1345 cpu_abort(cs
, "PowerPC in real mode should never raise "
1346 "any MMU exceptions\n");
1348 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1352 /* Access rights violation */
1353 cs
->exception_index
= POWERPC_EXCP_ISI
;
1354 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1355 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1356 env
->error_code
= 0;
1358 env
->error_code
= 0x08000000;
1362 /* No execute protection violation */
1363 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1364 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1365 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1366 env
->error_code
= 0;
1368 env
->error_code
= 0x10000000;
1370 cs
->exception_index
= POWERPC_EXCP_ISI
;
1373 /* Direct store exception */
1374 /* No code fetch is allowed in direct-store areas */
1375 cs
->exception_index
= POWERPC_EXCP_ISI
;
1376 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1377 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1378 env
->error_code
= 0;
1380 env
->error_code
= 0x10000000;
1387 /* No matches in page tables or TLB */
1388 switch (env
->mmu_model
) {
1389 case POWERPC_MMU_SOFT_6xx
:
1390 if (access_type
== MMU_DATA_STORE
) {
1391 cs
->exception_index
= POWERPC_EXCP_DSTLB
;
1392 env
->error_code
= 1 << 16;
1394 cs
->exception_index
= POWERPC_EXCP_DLTLB
;
1395 env
->error_code
= 0;
1397 env
->spr
[SPR_DMISS
] = eaddr
;
1398 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1400 env
->error_code
|= ctx
.key
<< 19;
1401 env
->spr
[SPR_HASH1
] = ppc_hash32_hpt_base(cpu
) +
1402 get_pteg_offset32(cpu
, ctx
.hash
[0]);
1403 env
->spr
[SPR_HASH2
] = ppc_hash32_hpt_base(cpu
) +
1404 get_pteg_offset32(cpu
, ctx
.hash
[1]);
1406 case POWERPC_MMU_SOFT_4xx
:
1407 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1408 env
->error_code
= 0;
1409 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1410 if (access_type
== MMU_DATA_STORE
) {
1411 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1413 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1416 case POWERPC_MMU_MPC8xx
:
1418 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1419 case POWERPC_MMU_BOOKE206
:
1420 booke206_update_mas_tlb_miss(env
, eaddr
, access_type
, mmu_idx
);
1422 case POWERPC_MMU_BOOKE
:
1423 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1424 env
->error_code
= 0;
1425 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1426 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, access_type
);
1428 case POWERPC_MMU_REAL
:
1429 cpu_abort(cs
, "PowerPC in real mode should never raise "
1430 "any MMU exceptions\n");
1432 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1436 /* Access rights violation */
1437 cs
->exception_index
= POWERPC_EXCP_DSI
;
1438 env
->error_code
= 0;
1439 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
) {
1440 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1441 if (access_type
== MMU_DATA_STORE
) {
1442 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
1444 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1445 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1446 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1447 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, access_type
);
1449 env
->spr
[SPR_DAR
] = eaddr
;
1450 if (access_type
== MMU_DATA_STORE
) {
1451 env
->spr
[SPR_DSISR
] = 0x0A000000;
1453 env
->spr
[SPR_DSISR
] = 0x08000000;
1458 /* Direct store exception */
1461 /* Floating point load/store */
1462 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
1463 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
1464 env
->spr
[SPR_DAR
] = eaddr
;
1467 /* lwarx, ldarx or stwcx. */
1468 cs
->exception_index
= POWERPC_EXCP_DSI
;
1469 env
->error_code
= 0;
1470 env
->spr
[SPR_DAR
] = eaddr
;
1471 if (access_type
== MMU_DATA_STORE
) {
1472 env
->spr
[SPR_DSISR
] = 0x06000000;
1474 env
->spr
[SPR_DSISR
] = 0x04000000;
1478 /* eciwx or ecowx */
1479 cs
->exception_index
= POWERPC_EXCP_DSI
;
1480 env
->error_code
= 0;
1481 env
->spr
[SPR_DAR
] = eaddr
;
1482 if (access_type
== MMU_DATA_STORE
) {
1483 env
->spr
[SPR_DSISR
] = 0x06100000;
1485 env
->spr
[SPR_DSISR
] = 0x04100000;
1489 printf("DSI: invalid exception (%d)\n", ret
);
1490 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
1492 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
1493 env
->spr
[SPR_DAR
] = eaddr
;
1503 /*****************************************************************************/
1505 bool ppc_xlate(PowerPCCPU
*cpu
, vaddr eaddr
, MMUAccessType access_type
,
1506 hwaddr
*raddrp
, int *psizep
, int *protp
,
1507 int mmu_idx
, bool guest_visible
)
1509 switch (cpu
->env
.mmu_model
) {
1510 #if defined(TARGET_PPC64)
1511 case POWERPC_MMU_3_00
:
1512 if (ppc64_v3_radix(cpu
)) {
1513 return ppc_radix64_xlate(cpu
, eaddr
, access_type
, raddrp
,
1514 psizep
, protp
, mmu_idx
, guest_visible
);
1517 case POWERPC_MMU_64B
:
1518 case POWERPC_MMU_2_03
:
1519 case POWERPC_MMU_2_06
:
1520 case POWERPC_MMU_2_07
:
1521 return ppc_hash64_xlate(cpu
, eaddr
, access_type
,
1522 raddrp
, psizep
, protp
, mmu_idx
, guest_visible
);
1525 case POWERPC_MMU_32B
:
1526 return ppc_hash32_xlate(cpu
, eaddr
, access_type
, raddrp
,
1527 psizep
, protp
, mmu_idx
, guest_visible
);
1530 return ppc_jumbo_xlate(cpu
, eaddr
, access_type
, raddrp
,
1531 psizep
, protp
, mmu_idx
, guest_visible
);
1535 hwaddr
ppc_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
1537 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1542 * Some MMUs have separate TLBs for code and data. If we only
1543 * try an MMU_DATA_LOAD, we may not be able to read instructions
1544 * mapped by code TLBs, so we also try a MMU_INST_FETCH.
1546 if (ppc_xlate(cpu
, addr
, MMU_DATA_LOAD
, &raddr
, &s
, &p
,
1547 cpu_mmu_index(&cpu
->env
, false), false) ||
1548 ppc_xlate(cpu
, addr
, MMU_INST_FETCH
, &raddr
, &s
, &p
,
1549 cpu_mmu_index(&cpu
->env
, true), false)) {
1550 return raddr
& TARGET_PAGE_MASK
;