2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
23 #include "exec/helper-proto.h"
24 #include "sysemu/kvm.h"
26 #include "mmu-hash64.h"
27 #include "mmu-hash32.h"
28 #include "exec/exec-all.h"
29 #include "exec/cpu_ldst.h"
31 #include "helper_regs.h"
32 #include "qemu/error-report.h"
33 #include "qemu/main-loop.h"
34 #include "qemu/qemu-print.h"
36 #include "mmu-book3s-v3.h"
37 #include "mmu-radix64.h"
39 /* #define DEBUG_MMU */
40 /* #define DEBUG_BATS */
41 /* #define DEBUG_SOFTWARE_TLB */
42 /* #define DUMP_PAGE_TABLES */
43 /* #define FLUSH_ALL_TLBS */
46 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
48 # define LOG_MMU_STATE(cpu) do { } while (0)
51 #ifdef DEBUG_SOFTWARE_TLB
52 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
54 # define LOG_SWTLB(...) do { } while (0)
58 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
60 # define LOG_BATS(...) do { } while (0)
63 /*****************************************************************************/
64 /* PowerPC MMU emulation */
66 /* Context used internally during MMU translations */
67 typedef struct mmu_ctx_t mmu_ctx_t
;
69 hwaddr raddr
; /* Real address */
70 hwaddr eaddr
; /* Effective address */
71 int prot
; /* Protection bits */
72 hwaddr hash
[2]; /* Pagetable hash values */
73 target_ulong ptem
; /* Virtual segment ID | API */
74 int key
; /* Access key */
75 int nx
; /* Non-execute area */
78 /* Common routines used by software and hardware TLBs emulation */
79 static inline int pte_is_valid(target_ulong pte0
)
81 return pte0
& 0x80000000 ? 1 : 0;
84 static inline void pte_invalidate(target_ulong
*pte0
)
89 #define PTE_PTEM_MASK 0x7FFFFFBF
90 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
92 static int pp_check(int key
, int pp
, int nx
)
96 /* Compute access rights */
103 access
|= PAGE_WRITE
;
119 access
= PAGE_READ
| PAGE_WRITE
;
130 static int check_prot(int prot
, MMUAccessType access_type
)
132 return prot
& prot_for_access_type(access_type
) ? 0 : -2;
135 static int ppc6xx_tlb_pte_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
136 target_ulong pte1
, int h
,
137 MMUAccessType access_type
)
139 target_ulong ptem
, mmask
;
140 int access
, ret
, pteh
, ptev
, pp
;
143 /* Check validity and table match */
144 ptev
= pte_is_valid(pte0
);
145 pteh
= (pte0
>> 6) & 1;
146 if (ptev
&& h
== pteh
) {
147 /* Check vsid & api */
148 ptem
= pte0
& PTE_PTEM_MASK
;
149 mmask
= PTE_CHECK_MASK
;
150 pp
= pte1
& 0x00000003;
151 if (ptem
== ctx
->ptem
) {
152 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
153 /* all matches should have equal RPN, WIMG & PP */
154 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
155 qemu_log_mask(CPU_LOG_MMU
, "Bad RPN/WIMG/PP\n");
159 /* Compute access rights */
160 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
161 /* Keep the matching PTE information */
164 ret
= check_prot(ctx
->prot
, access_type
);
167 qemu_log_mask(CPU_LOG_MMU
, "PTE access granted !\n");
169 /* Access right violation */
170 qemu_log_mask(CPU_LOG_MMU
, "PTE access rejected\n");
178 static int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
179 int ret
, MMUAccessType access_type
)
183 /* Update page flags */
184 if (!(*pte1p
& 0x00000100)) {
185 /* Update accessed flag */
186 *pte1p
|= 0x00000100;
189 if (!(*pte1p
& 0x00000080)) {
190 if (access_type
== MMU_DATA_STORE
&& ret
== 0) {
191 /* Update changed flag */
192 *pte1p
|= 0x00000080;
195 /* Force page fault for first write access */
196 ctx
->prot
&= ~PAGE_WRITE
;
203 /* Software driven TLB helpers */
204 static inline int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
205 int way
, int is_code
)
209 /* Select TLB num in a way from address */
210 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
212 nr
+= env
->tlb_per_way
* way
;
213 /* 6xx have separate TLBs for instructions and data */
214 if (is_code
&& env
->id_tlbs
== 1) {
221 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState
*env
)
226 /* LOG_SWTLB("Invalidate all TLBs\n"); */
227 /* Invalidate all defined software TLB */
229 if (env
->id_tlbs
== 1) {
232 for (nr
= 0; nr
< max
; nr
++) {
233 tlb
= &env
->tlb
.tlb6
[nr
];
234 pte_invalidate(&tlb
->pte0
);
236 tlb_flush(env_cpu(env
));
239 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState
*env
,
241 int is_code
, int match_epn
)
243 #if !defined(FLUSH_ALL_TLBS)
244 CPUState
*cs
= env_cpu(env
);
248 /* Invalidate ITLB + DTLB, all ways */
249 for (way
= 0; way
< env
->nb_ways
; way
++) {
250 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
251 tlb
= &env
->tlb
.tlb6
[nr
];
252 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
253 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
255 pte_invalidate(&tlb
->pte0
);
256 tlb_flush_page(cs
, tlb
->EPN
);
260 /* XXX: PowerPC specification say this is valid as well */
261 ppc6xx_tlb_invalidate_all(env
);
265 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState
*env
,
266 target_ulong eaddr
, int is_code
)
268 ppc6xx_tlb_invalidate_virt2(env
, eaddr
, is_code
, 0);
271 static void ppc6xx_tlb_store(CPUPPCState
*env
, target_ulong EPN
, int way
,
272 int is_code
, target_ulong pte0
, target_ulong pte1
)
277 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
278 tlb
= &env
->tlb
.tlb6
[nr
];
279 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
280 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
281 /* Invalidate any pending reference in QEMU for this virtual address */
282 ppc6xx_tlb_invalidate_virt2(env
, EPN
, is_code
, 1);
286 /* Store last way for LRU mechanism */
290 static int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
291 target_ulong eaddr
, MMUAccessType access_type
)
298 ret
= -1; /* No TLB found */
299 for (way
= 0; way
< env
->nb_ways
; way
++) {
300 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, access_type
== MMU_INST_FETCH
);
301 tlb
= &env
->tlb
.tlb6
[nr
];
302 /* This test "emulates" the PTE index match for hardware TLBs */
303 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
304 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
305 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
306 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
307 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
310 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
311 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
312 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
313 tlb
->EPN
, eaddr
, tlb
->pte1
,
314 access_type
== MMU_DATA_STORE
? 'S' : 'L',
315 access_type
== MMU_INST_FETCH
? 'I' : 'D');
316 switch (ppc6xx_tlb_pte_check(ctx
, tlb
->pte0
, tlb
->pte1
,
319 /* TLB inconsistency */
322 /* Access violation */
333 * XXX: we should go on looping to check all TLBs
334 * consistency but we can speed-up the whole thing as
335 * the result would be undefined if TLBs are not
345 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
346 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
347 /* Update page flags */
348 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, access_type
);
354 /* Perform BAT hit & translation */
355 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
356 int *validp
, int *protp
, target_ulong
*BATu
,
362 bl
= (*BATu
& 0x00001FFC) << 15;
365 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
366 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
368 pp
= *BATl
& 0x00000003;
370 prot
= PAGE_READ
| PAGE_EXEC
;
381 static int get_bat_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
382 target_ulong
virtual, MMUAccessType access_type
)
384 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
385 target_ulong BEPIl
, BEPIu
, bl
;
388 bool ifetch
= access_type
== MMU_INST_FETCH
;
390 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
391 ifetch
? 'I' : 'D', virtual);
393 BATlt
= env
->IBAT
[1];
394 BATut
= env
->IBAT
[0];
396 BATlt
= env
->DBAT
[1];
397 BATut
= env
->DBAT
[0];
399 for (i
= 0; i
< env
->nb_BATs
; i
++) {
402 BEPIu
= *BATu
& 0xF0000000;
403 BEPIl
= *BATu
& 0x0FFE0000;
404 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
405 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
406 " BATl " TARGET_FMT_lx
"\n", __func__
,
407 ifetch
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
408 if ((virtual & 0xF0000000) == BEPIu
&&
409 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
412 /* Get physical address */
413 ctx
->raddr
= (*BATl
& 0xF0000000) |
414 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
415 (virtual & 0x0001F000);
416 /* Compute access rights */
418 ret
= check_prot(ctx
->prot
, access_type
);
420 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
421 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
422 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
429 #if defined(DEBUG_BATS)
430 if (qemu_log_enabled()) {
431 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
432 for (i
= 0; i
< 4; i
++) {
435 BEPIu
= *BATu
& 0xF0000000;
436 BEPIl
= *BATu
& 0x0FFE0000;
437 bl
= (*BATu
& 0x00001FFC) << 15;
438 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
439 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
440 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
441 __func__
, ifetch
? 'I' : 'D', i
, virtual,
442 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
451 /* Perform segment based translation */
452 static int get_segment_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
453 target_ulong eaddr
, MMUAccessType access_type
,
456 PowerPCCPU
*cpu
= env_archcpu(env
);
459 int ds
, pr
, target_page_bits
;
461 target_ulong sr
, pgidx
;
466 sr
= env
->sr
[eaddr
>> 28];
467 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
468 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
469 ds
= sr
& 0x80000000 ? 1 : 0;
470 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
471 vsid
= sr
& 0x00FFFFFF;
472 target_page_bits
= TARGET_PAGE_BITS
;
473 qemu_log_mask(CPU_LOG_MMU
,
474 "Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
475 " nip=" TARGET_FMT_lx
" lr=" TARGET_FMT_lx
476 " ir=%d dr=%d pr=%d %d t=%d\n",
477 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
478 (int)msr_dr
, pr
!= 0 ? 1 : 0, access_type
== MMU_DATA_STORE
, type
);
479 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
481 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
483 qemu_log_mask(CPU_LOG_MMU
,
484 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
485 ctx
->key
, ds
, ctx
->nx
, vsid
);
488 /* Check if instruction fetch is allowed, if needed */
489 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
490 /* Page address translation */
491 qemu_log_mask(CPU_LOG_MMU
, "htab_base " TARGET_FMT_plx
492 " htab_mask " TARGET_FMT_plx
493 " hash " TARGET_FMT_plx
"\n",
494 ppc_hash32_hpt_base(cpu
), ppc_hash32_hpt_mask(cpu
), hash
);
496 ctx
->hash
[1] = ~hash
;
498 /* Initialize real address with an invalid value */
499 ctx
->raddr
= (hwaddr
)-1ULL;
500 /* Software TLB search */
501 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, access_type
);
502 #if defined(DUMP_PAGE_TABLES)
503 if (qemu_loglevel_mask(CPU_LOG_MMU
)) {
504 CPUState
*cs
= env_cpu(env
);
506 uint32_t a0
, a1
, a2
, a3
;
508 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
509 "\n", ppc_hash32_hpt_base(cpu
),
510 ppc_hash32_hpt_mask(env
) + 0x80);
511 for (curaddr
= ppc_hash32_hpt_base(cpu
);
512 curaddr
< (ppc_hash32_hpt_base(cpu
)
513 + ppc_hash32_hpt_mask(cpu
) + 0x80);
515 a0
= ldl_phys(cs
->as
, curaddr
);
516 a1
= ldl_phys(cs
->as
, curaddr
+ 4);
517 a2
= ldl_phys(cs
->as
, curaddr
+ 8);
518 a3
= ldl_phys(cs
->as
, curaddr
+ 12);
519 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
520 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
521 curaddr
, a0
, a1
, a2
, a3
);
527 qemu_log_mask(CPU_LOG_MMU
, "No access allowed\n");
533 qemu_log_mask(CPU_LOG_MMU
, "direct store...\n");
534 /* Direct-store segment : absolutely *BUGGY* for now */
537 * Direct-store implies a 32-bit MMU.
538 * Check the Segment Register's bus unit ID (BUID).
540 sr
= env
->sr
[eaddr
>> 28];
541 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
543 * Memory-forced I/O controller interface access
545 * If T=1 and BUID=x'07F', the 601 performs a memory
546 * access to SR[28-31] LA[4-31], bypassing all protection
549 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
550 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
556 /* Integer load/store : only access allowed */
559 /* No code fetch is allowed in direct-store areas */
562 /* Floating point load/store */
565 /* lwarx, ldarx or srwcx. */
569 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
571 * Should make the instruction do no-op. As it already do
572 * no-op, it's quite easy :-)
580 qemu_log_mask(CPU_LOG_MMU
, "ERROR: instruction should not need "
581 "address translation\n");
584 if ((access_type
== MMU_DATA_STORE
|| ctx
->key
!= 1) &&
585 (access_type
== MMU_DATA_LOAD
|| ctx
->key
!= 0)) {
596 /* Generic TLB check function for embedded PowerPC implementations */
597 static int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
599 target_ulong address
, uint32_t pid
, int ext
,
604 /* Check valid flag */
605 if (!(tlb
->prot
& PAGE_VALID
)) {
608 mask
= ~(tlb
->size
- 1);
609 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
610 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
611 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
613 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
616 /* Check effective address */
617 if ((address
& mask
) != tlb
->EPN
) {
620 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
622 /* Extend the physical address to 36 bits */
623 *raddrp
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
629 /* Generic TLB search function for PowerPC embedded implementations */
630 static int ppcemb_tlb_search(CPUPPCState
*env
, target_ulong address
,
637 /* Default return value is no match */
639 for (i
= 0; i
< env
->nb_tlb
; i
++) {
640 tlb
= &env
->tlb
.tlbe
[i
];
641 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
650 /* Helpers specific to PowerPC 40x implementations */
651 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState
*env
)
656 for (i
= 0; i
< env
->nb_tlb
; i
++) {
657 tlb
= &env
->tlb
.tlbe
[i
];
658 tlb
->prot
&= ~PAGE_VALID
;
660 tlb_flush(env_cpu(env
));
663 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
664 target_ulong address
,
665 MMUAccessType access_type
,
670 int i
, ret
, zsel
, zpr
, pr
;
673 raddr
= (hwaddr
)-1ULL;
675 for (i
= 0; i
< env
->nb_tlb
; i
++) {
676 tlb
= &env
->tlb
.tlbe
[i
];
677 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
678 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
681 zsel
= (tlb
->attr
>> 4) & 0xF;
682 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
683 LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
684 __func__
, i
, zsel
, zpr
, access_type
, tlb
->attr
);
685 /* Check execute enable bit */
693 /* All accesses granted */
694 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
699 /* Raise Zone protection fault. */
700 env
->spr
[SPR_40x_ESR
] = 1 << 22;
708 /* Check from TLB entry */
709 ctx
->prot
= tlb
->prot
;
710 ret
= check_prot(ctx
->prot
, access_type
);
712 env
->spr
[SPR_40x_ESR
] = 0;
718 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
719 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
724 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
725 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
730 void store_40x_sler(CPUPPCState
*env
, uint32_t val
)
732 /* XXX: TO BE FIXED */
733 if (val
!= 0x00000000) {
734 cpu_abort(env_cpu(env
),
735 "Little-endian regions are not supported by now\n");
737 env
->spr
[SPR_405_SLER
] = val
;
740 static int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
741 hwaddr
*raddr
, int *prot
, target_ulong address
,
742 MMUAccessType access_type
, int type
, int i
)
746 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
747 env
->spr
[SPR_BOOKE_PID
],
748 !env
->nb_pids
, i
) >= 0) {
752 if (env
->spr
[SPR_BOOKE_PID1
] &&
753 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
754 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
758 if (env
->spr
[SPR_BOOKE_PID2
] &&
759 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
760 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
764 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
770 prot2
= tlb
->prot
& 0xF;
772 prot2
= (tlb
->prot
>> 4) & 0xF;
775 /* Check the address space */
776 if (type
== ACCESS_CODE
) {
777 if (msr_ir
!= (tlb
->attr
& 1)) {
778 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
783 if (prot2
& PAGE_EXEC
) {
784 LOG_SWTLB("%s: good TLB!\n", __func__
);
788 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
791 if (msr_dr
!= (tlb
->attr
& 1)) {
792 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
797 if (prot2
& (access_type
== MMU_DATA_LOAD
? PAGE_READ
: PAGE_WRITE
)) {
798 LOG_SWTLB("%s: found TLB!\n", __func__
);
802 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
809 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
810 target_ulong address
,
811 MMUAccessType access_type
,
819 raddr
= (hwaddr
)-1ULL;
820 for (i
= 0; i
< env
->nb_tlb
; i
++) {
821 tlb
= &env
->tlb
.tlbe
[i
];
822 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
823 access_type
, type
, i
);
831 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
832 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
835 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
836 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
842 static void booke206_flush_tlb(CPUPPCState
*env
, int flags
,
843 const int check_iprot
)
847 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
849 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
850 if (flags
& (1 << i
)) {
851 tlb_size
= booke206_tlb_size(env
, i
);
852 for (j
= 0; j
< tlb_size
; j
++) {
853 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
854 tlb
[j
].mas1
&= ~MAS1_VALID
;
858 tlb
+= booke206_tlb_size(env
, i
);
861 tlb_flush(env_cpu(env
));
864 static hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
,
869 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
871 return 1024ULL << tlbm_size
;
874 /* TLB check function for MAS based SoftTLBs */
875 static int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
876 hwaddr
*raddrp
, target_ulong address
,
883 /* In 32bit mode we can only address 32bit EAs */
884 address
= (uint32_t)address
;
887 /* Check valid flag */
888 if (!(tlb
->mas1
& MAS1_VALID
)) {
892 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
893 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
894 PRIx64
" mask=0x%" HWADDR_PRIx
" MAS7_3=0x%" PRIx64
" MAS8=0x%"
895 PRIx32
"\n", __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
,
896 tlb
->mas7_3
, tlb
->mas8
);
899 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
900 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
904 /* Check effective address */
905 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
910 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
916 static bool is_epid_mmu(int mmu_idx
)
918 return mmu_idx
== PPC_TLB_EPID_STORE
|| mmu_idx
== PPC_TLB_EPID_LOAD
;
921 static uint32_t mmubooke206_esr(int mmu_idx
, MMUAccessType access_type
)
924 if (access_type
== MMU_DATA_STORE
) {
927 if (is_epid_mmu(mmu_idx
)) {
934 * Get EPID register given the mmu_idx. If this is regular load,
935 * construct the EPID access bits from current processor state
937 * Get the effective AS and PR bits and the PID. The PID is returned
938 * only if EPID load is requested, otherwise the caller must detect
939 * the correct EPID. Return true if valid EPID is returned.
941 static bool mmubooke206_get_as(CPUPPCState
*env
,
942 int mmu_idx
, uint32_t *epid_out
,
943 bool *as_out
, bool *pr_out
)
945 if (is_epid_mmu(mmu_idx
)) {
947 if (mmu_idx
== PPC_TLB_EPID_STORE
) {
948 epidr
= env
->spr
[SPR_BOOKE_EPSC
];
950 epidr
= env
->spr
[SPR_BOOKE_EPLC
];
952 *epid_out
= (epidr
& EPID_EPID
) >> EPID_EPID_SHIFT
;
953 *as_out
= !!(epidr
& EPID_EAS
);
954 *pr_out
= !!(epidr
& EPID_EPR
);
963 /* Check if the tlb found by hashing really matches */
964 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
965 hwaddr
*raddr
, int *prot
,
966 target_ulong address
,
967 MMUAccessType access_type
,
968 int type
, int mmu_idx
)
974 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
977 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
978 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
982 if (env
->spr
[SPR_BOOKE_PID1
] &&
983 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
984 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
988 if (env
->spr
[SPR_BOOKE_PID2
] &&
989 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
990 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
994 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
, epid
) >= 0) {
999 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1005 if (tlb
->mas7_3
& MAS3_UR
) {
1008 if (tlb
->mas7_3
& MAS3_UW
) {
1009 prot2
|= PAGE_WRITE
;
1011 if (tlb
->mas7_3
& MAS3_UX
) {
1015 if (tlb
->mas7_3
& MAS3_SR
) {
1018 if (tlb
->mas7_3
& MAS3_SW
) {
1019 prot2
|= PAGE_WRITE
;
1021 if (tlb
->mas7_3
& MAS3_SX
) {
1026 /* Check the address space and permissions */
1027 if (type
== ACCESS_CODE
) {
1028 /* There is no way to fetch code using epid load */
1030 if (msr_ir
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1031 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1036 if (prot2
& PAGE_EXEC
) {
1037 LOG_SWTLB("%s: good TLB!\n", __func__
);
1041 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1044 if (as
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1045 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1050 if (prot2
& (access_type
== MMU_DATA_LOAD
? PAGE_READ
: PAGE_WRITE
)) {
1051 LOG_SWTLB("%s: found TLB!\n", __func__
);
1055 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1062 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1063 target_ulong address
,
1064 MMUAccessType access_type
,
1065 int type
, int mmu_idx
)
1072 raddr
= (hwaddr
)-1ULL;
1074 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1075 int ways
= booke206_tlb_ways(env
, i
);
1077 for (j
= 0; j
< ways
; j
++) {
1078 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1082 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1083 access_type
, type
, mmu_idx
);
1094 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1095 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1098 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1099 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1105 static const char *book3e_tsize_to_str
[32] = {
1106 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1107 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1108 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1112 static void mmubooke_dump_mmu(CPUPPCState
*env
)
1114 ppcemb_tlb_t
*entry
;
1117 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1118 qemu_printf("Cannot access KVM TLB\n");
1122 qemu_printf("\nTLB:\n");
1123 qemu_printf("Effective Physical Size PID Prot "
1126 entry
= &env
->tlb
.tlbe
[0];
1127 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
1130 uint64_t size
= (uint64_t)entry
->size
;
1133 /* Check valid flag */
1134 if (!(entry
->prot
& PAGE_VALID
)) {
1138 mask
= ~(entry
->size
- 1);
1139 ea
= entry
->EPN
& mask
;
1140 pa
= entry
->RPN
& mask
;
1141 /* Extend the physical address to 36 bits */
1142 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
1143 if (size
>= 1 * MiB
) {
1144 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ MiB
);
1146 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
/ KiB
);
1148 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
1149 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
1150 entry
->prot
, entry
->attr
);
1155 static void mmubooke206_dump_one_tlb(CPUPPCState
*env
, int tlbn
, int offset
,
1158 ppcmas_tlb_t
*entry
;
1161 qemu_printf("\nTLB%d:\n", tlbn
);
1162 qemu_printf("Effective Physical Size TID TS SRWX"
1163 " URWX WIMGE U0123\n");
1165 entry
= &env
->tlb
.tlbm
[offset
];
1166 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
1167 hwaddr ea
, pa
, size
;
1170 if (!(entry
->mas1
& MAS1_VALID
)) {
1174 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1175 size
= 1024ULL << tsize
;
1176 ea
= entry
->mas2
& ~(size
- 1);
1177 pa
= entry
->mas7_3
& ~(size
- 1);
1179 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
1180 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1181 (uint64_t)ea
, (uint64_t)pa
,
1182 book3e_tsize_to_str
[tsize
],
1183 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1184 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1185 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1186 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1187 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1188 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1189 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1190 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1191 entry
->mas2
& MAS2_W
? 'W' : '-',
1192 entry
->mas2
& MAS2_I
? 'I' : '-',
1193 entry
->mas2
& MAS2_M
? 'M' : '-',
1194 entry
->mas2
& MAS2_G
? 'G' : '-',
1195 entry
->mas2
& MAS2_E
? 'E' : '-',
1196 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1197 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1198 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1199 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1203 static void mmubooke206_dump_mmu(CPUPPCState
*env
)
1208 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1209 qemu_printf("Cannot access KVM TLB\n");
1213 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1214 int size
= booke206_tlb_size(env
, i
);
1220 mmubooke206_dump_one_tlb(env
, i
, offset
, size
);
1225 static void mmu6xx_dump_BATs(CPUPPCState
*env
, int type
)
1227 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
1228 target_ulong BEPIl
, BEPIu
, bl
;
1233 BATlt
= env
->IBAT
[1];
1234 BATut
= env
->IBAT
[0];
1237 BATlt
= env
->DBAT
[1];
1238 BATut
= env
->DBAT
[0];
1242 for (i
= 0; i
< env
->nb_BATs
; i
++) {
1245 BEPIu
= *BATu
& 0xF0000000;
1246 BEPIl
= *BATu
& 0x0FFE0000;
1247 bl
= (*BATu
& 0x00001FFC) << 15;
1248 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1249 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
1250 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
1251 type
== ACCESS_CODE
? "code" : "data", i
,
1252 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
1256 static void mmu6xx_dump_mmu(CPUPPCState
*env
)
1258 PowerPCCPU
*cpu
= env_archcpu(env
);
1261 int type
, way
, entry
, i
;
1263 qemu_printf("HTAB base = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_base(cpu
));
1264 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_mask(cpu
));
1266 qemu_printf("\nSegment registers:\n");
1267 for (i
= 0; i
< 32; i
++) {
1269 if (sr
& 0x80000000) {
1270 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1271 "CNTLR_SPEC=0x%05x\n", i
,
1272 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1273 sr
& 0x20000000 ? 1 : 0, (uint32_t)((sr
>> 20) & 0x1FF),
1274 (uint32_t)(sr
& 0xFFFFF));
1276 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i
,
1277 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1278 sr
& 0x20000000 ? 1 : 0, sr
& 0x10000000 ? 1 : 0,
1279 (uint32_t)(sr
& 0x00FFFFFF));
1283 qemu_printf("\nBATs:\n");
1284 mmu6xx_dump_BATs(env
, ACCESS_INT
);
1285 mmu6xx_dump_BATs(env
, ACCESS_CODE
);
1287 if (env
->id_tlbs
!= 1) {
1288 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1289 " for code and data\n");
1292 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1294 for (type
= 0; type
< 2; type
++) {
1295 for (way
= 0; way
< env
->nb_ways
; way
++) {
1296 for (entry
= env
->nb_tlb
* type
+ env
->tlb_per_way
* way
;
1297 entry
< (env
->nb_tlb
* type
+ env
->tlb_per_way
* (way
+ 1));
1300 tlb
= &env
->tlb
.tlb6
[entry
];
1301 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1302 TARGET_FMT_lx
" " TARGET_FMT_lx
"]\n",
1303 type
? "code" : "data", entry
% env
->nb_tlb
,
1305 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
1306 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
);
1312 void dump_mmu(CPUPPCState
*env
)
1314 switch (env
->mmu_model
) {
1315 case POWERPC_MMU_BOOKE
:
1316 mmubooke_dump_mmu(env
);
1318 case POWERPC_MMU_BOOKE206
:
1319 mmubooke206_dump_mmu(env
);
1321 case POWERPC_MMU_SOFT_6xx
:
1322 case POWERPC_MMU_SOFT_74xx
:
1323 mmu6xx_dump_mmu(env
);
1325 #if defined(TARGET_PPC64)
1326 case POWERPC_MMU_64B
:
1327 case POWERPC_MMU_2_03
:
1328 case POWERPC_MMU_2_06
:
1329 case POWERPC_MMU_2_07
:
1330 dump_slb(env_archcpu(env
));
1332 case POWERPC_MMU_3_00
:
1333 if (ppc64_v3_radix(env_archcpu(env
))) {
1334 qemu_log_mask(LOG_UNIMP
, "%s: the PPC64 MMU is unsupported\n",
1337 dump_slb(env_archcpu(env
));
1342 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1346 static int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
, target_ulong eaddr
,
1347 MMUAccessType access_type
)
1352 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1354 switch (env
->mmu_model
) {
1355 case POWERPC_MMU_SOFT_6xx
:
1356 case POWERPC_MMU_SOFT_74xx
:
1357 case POWERPC_MMU_SOFT_4xx
:
1358 case POWERPC_MMU_REAL
:
1359 case POWERPC_MMU_BOOKE
:
1360 ctx
->prot
|= PAGE_WRITE
;
1363 case POWERPC_MMU_SOFT_4xx_Z
:
1364 if (unlikely(msr_pe
!= 0)) {
1366 * 403 family add some particular protections, using
1367 * PBL/PBU registers for accesses with no translation.
1370 /* Check PLB validity */
1371 (env
->pb
[0] < env
->pb
[1] &&
1372 /* and address in plb area */
1373 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1374 (env
->pb
[2] < env
->pb
[3] &&
1375 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1376 if (in_plb
^ msr_px
) {
1377 /* Access in protected area */
1378 if (access_type
== MMU_DATA_STORE
) {
1379 /* Access is not allowed */
1383 /* Read-write access is allowed */
1384 ctx
->prot
|= PAGE_WRITE
;
1390 /* Caller's checks mean we should never get here for other models */
1398 static int get_physical_address_wtlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1400 MMUAccessType access_type
, int type
,
1404 bool real_mode
= (type
== ACCESS_CODE
&& msr_ir
== 0)
1405 || (type
!= ACCESS_CODE
&& msr_dr
== 0);
1407 switch (env
->mmu_model
) {
1408 case POWERPC_MMU_SOFT_6xx
:
1409 case POWERPC_MMU_SOFT_74xx
:
1411 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1413 /* Try to find a BAT */
1414 if (env
->nb_BATs
!= 0) {
1415 ret
= get_bat_6xx_tlb(env
, ctx
, eaddr
, access_type
);
1418 /* We didn't match any BAT entry or don't have BATs */
1419 ret
= get_segment_6xx_tlb(env
, ctx
, eaddr
, access_type
, type
);
1424 case POWERPC_MMU_SOFT_4xx
:
1425 case POWERPC_MMU_SOFT_4xx_Z
:
1427 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1429 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1433 case POWERPC_MMU_BOOKE
:
1434 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1437 case POWERPC_MMU_BOOKE206
:
1438 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, access_type
,
1441 case POWERPC_MMU_MPC8xx
:
1443 cpu_abort(env_cpu(env
), "MPC8xx MMU model is not implemented\n");
1445 case POWERPC_MMU_REAL
:
1447 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1449 cpu_abort(env_cpu(env
),
1450 "PowerPC in real mode do not do any translation\n");
1454 cpu_abort(env_cpu(env
), "Unknown or invalid MMU model\n");
1461 static int get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1462 target_ulong eaddr
, MMUAccessType access_type
,
1465 return get_physical_address_wtlb(env
, ctx
, eaddr
, access_type
, type
, 0);
1468 hwaddr
ppc_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
1470 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1471 CPUPPCState
*env
= &cpu
->env
;
1474 switch (env
->mmu_model
) {
1475 #if defined(TARGET_PPC64)
1476 case POWERPC_MMU_64B
:
1477 case POWERPC_MMU_2_03
:
1478 case POWERPC_MMU_2_06
:
1479 case POWERPC_MMU_2_07
:
1480 return ppc_hash64_get_phys_page_debug(cpu
, addr
);
1481 case POWERPC_MMU_3_00
:
1482 return ppc64_v3_get_phys_page_debug(cpu
, addr
);
1485 case POWERPC_MMU_32B
:
1486 case POWERPC_MMU_601
:
1487 return ppc_hash32_get_phys_page_debug(cpu
, addr
);
1493 if (unlikely(get_physical_address(env
, &ctx
, addr
, MMU_DATA_LOAD
,
1494 ACCESS_INT
) != 0)) {
1497 * Some MMUs have separate TLBs for code and data. If we only
1498 * try an ACCESS_INT, we may not be able to read instructions
1499 * mapped by code TLBs, so we also try a ACCESS_CODE.
1501 if (unlikely(get_physical_address(env
, &ctx
, addr
, MMU_INST_FETCH
,
1502 ACCESS_CODE
) != 0)) {
1507 return ctx
.raddr
& TARGET_PAGE_MASK
;
1510 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1511 MMUAccessType access_type
, int mmu_idx
)
1515 uint32_t missed_tid
= 0;
1516 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
1518 if (access_type
== MMU_INST_FETCH
) {
1521 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1522 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1523 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1524 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1525 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1526 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1530 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1531 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1534 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1535 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1538 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1539 case MAS4_TIDSELD_PID0
:
1540 missed_tid
= env
->spr
[SPR_BOOKE_PID
];
1542 case MAS4_TIDSELD_PID1
:
1543 missed_tid
= env
->spr
[SPR_BOOKE_PID1
];
1545 case MAS4_TIDSELD_PID2
:
1546 missed_tid
= env
->spr
[SPR_BOOKE_PID2
];
1549 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1552 env
->spr
[SPR_BOOKE_MAS6
] |= missed_tid
<< 16;
1554 env
->spr
[SPR_BOOKE_MAS1
] |= (missed_tid
<< MAS1_TID_SHIFT
);
1557 /* next victim logic */
1558 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1560 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1561 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1564 /* Perform address translation */
1565 static int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
,
1566 MMUAccessType access_type
, int mmu_idx
)
1568 CPUState
*cs
= env_cpu(env
);
1569 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1574 if (access_type
== MMU_INST_FETCH
) {
1579 type
= env
->access_type
;
1581 ret
= get_physical_address_wtlb(env
, &ctx
, address
, access_type
,
1584 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
1585 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1586 mmu_idx
, TARGET_PAGE_SIZE
);
1588 } else if (ret
< 0) {
1590 if (type
== ACCESS_CODE
) {
1593 /* No matches in page tables or TLB */
1594 switch (env
->mmu_model
) {
1595 case POWERPC_MMU_SOFT_6xx
:
1596 cs
->exception_index
= POWERPC_EXCP_IFTLB
;
1597 env
->error_code
= 1 << 18;
1598 env
->spr
[SPR_IMISS
] = address
;
1599 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1601 case POWERPC_MMU_SOFT_74xx
:
1602 cs
->exception_index
= POWERPC_EXCP_IFTLB
;
1604 case POWERPC_MMU_SOFT_4xx
:
1605 case POWERPC_MMU_SOFT_4xx_Z
:
1606 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1607 env
->error_code
= 0;
1608 env
->spr
[SPR_40x_DEAR
] = address
;
1609 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1611 case POWERPC_MMU_BOOKE206
:
1612 booke206_update_mas_tlb_miss(env
, address
, 2, mmu_idx
);
1614 case POWERPC_MMU_BOOKE
:
1615 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1616 env
->error_code
= 0;
1617 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1618 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, MMU_DATA_LOAD
);
1620 case POWERPC_MMU_MPC8xx
:
1622 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1624 case POWERPC_MMU_REAL
:
1625 cpu_abort(cs
, "PowerPC in real mode should never raise "
1626 "any MMU exceptions\n");
1629 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1634 /* Access rights violation */
1635 cs
->exception_index
= POWERPC_EXCP_ISI
;
1636 env
->error_code
= 0x08000000;
1639 /* No execute protection violation */
1640 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1641 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1642 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1644 cs
->exception_index
= POWERPC_EXCP_ISI
;
1645 env
->error_code
= 0x10000000;
1648 /* Direct store exception */
1649 /* No code fetch is allowed in direct-store areas */
1650 cs
->exception_index
= POWERPC_EXCP_ISI
;
1651 env
->error_code
= 0x10000000;
1657 /* No matches in page tables or TLB */
1658 switch (env
->mmu_model
) {
1659 case POWERPC_MMU_SOFT_6xx
:
1660 if (access_type
== MMU_DATA_STORE
) {
1661 cs
->exception_index
= POWERPC_EXCP_DSTLB
;
1662 env
->error_code
= 1 << 16;
1664 cs
->exception_index
= POWERPC_EXCP_DLTLB
;
1665 env
->error_code
= 0;
1667 env
->spr
[SPR_DMISS
] = address
;
1668 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1670 env
->error_code
|= ctx
.key
<< 19;
1671 env
->spr
[SPR_HASH1
] = ppc_hash32_hpt_base(cpu
) +
1672 get_pteg_offset32(cpu
, ctx
.hash
[0]);
1673 env
->spr
[SPR_HASH2
] = ppc_hash32_hpt_base(cpu
) +
1674 get_pteg_offset32(cpu
, ctx
.hash
[1]);
1676 case POWERPC_MMU_SOFT_74xx
:
1677 if (access_type
== MMU_DATA_STORE
) {
1678 cs
->exception_index
= POWERPC_EXCP_DSTLB
;
1680 cs
->exception_index
= POWERPC_EXCP_DLTLB
;
1683 /* Implement LRU algorithm */
1684 env
->error_code
= ctx
.key
<< 19;
1685 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1686 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1687 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1689 case POWERPC_MMU_SOFT_4xx
:
1690 case POWERPC_MMU_SOFT_4xx_Z
:
1691 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1692 env
->error_code
= 0;
1693 env
->spr
[SPR_40x_DEAR
] = address
;
1694 if (access_type
== MMU_DATA_STORE
) {
1695 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1697 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1700 case POWERPC_MMU_MPC8xx
:
1702 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1704 case POWERPC_MMU_BOOKE206
:
1705 booke206_update_mas_tlb_miss(env
, address
, access_type
, mmu_idx
);
1707 case POWERPC_MMU_BOOKE
:
1708 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1709 env
->error_code
= 0;
1710 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1711 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, access_type
);
1713 case POWERPC_MMU_REAL
:
1714 cpu_abort(cs
, "PowerPC in real mode should never raise "
1715 "any MMU exceptions\n");
1718 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1723 /* Access rights violation */
1724 cs
->exception_index
= POWERPC_EXCP_DSI
;
1725 env
->error_code
= 0;
1726 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
1727 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
1728 env
->spr
[SPR_40x_DEAR
] = address
;
1729 if (access_type
== MMU_DATA_STORE
) {
1730 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
1732 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1733 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1734 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1735 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, access_type
);
1737 env
->spr
[SPR_DAR
] = address
;
1738 if (access_type
== MMU_DATA_STORE
) {
1739 env
->spr
[SPR_DSISR
] = 0x0A000000;
1741 env
->spr
[SPR_DSISR
] = 0x08000000;
1746 /* Direct store exception */
1749 /* Floating point load/store */
1750 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
1751 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
1752 env
->spr
[SPR_DAR
] = address
;
1755 /* lwarx, ldarx or stwcx. */
1756 cs
->exception_index
= POWERPC_EXCP_DSI
;
1757 env
->error_code
= 0;
1758 env
->spr
[SPR_DAR
] = address
;
1759 if (access_type
== MMU_DATA_STORE
) {
1760 env
->spr
[SPR_DSISR
] = 0x06000000;
1762 env
->spr
[SPR_DSISR
] = 0x04000000;
1766 /* eciwx or ecowx */
1767 cs
->exception_index
= POWERPC_EXCP_DSI
;
1768 env
->error_code
= 0;
1769 env
->spr
[SPR_DAR
] = address
;
1770 if (access_type
== MMU_DATA_STORE
) {
1771 env
->spr
[SPR_DSISR
] = 0x06100000;
1773 env
->spr
[SPR_DSISR
] = 0x04100000;
1777 printf("DSI: invalid exception (%d)\n", ret
);
1778 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
1780 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
1781 env
->spr
[SPR_DAR
] = address
;
1793 /*****************************************************************************/
1794 /* BATs management */
1795 #if !defined(FLUSH_ALL_TLBS)
1796 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
1799 CPUState
*cs
= env_cpu(env
);
1800 target_ulong base
, end
, page
;
1802 base
= BATu
& ~0x0001FFFF;
1803 end
= base
+ mask
+ 0x00020000;
1804 if (((end
- base
) >> TARGET_PAGE_BITS
) > 1024) {
1805 /* Flushing 1024 4K pages is slower than a complete flush */
1806 LOG_BATS("Flush all BATs\n");
1808 LOG_BATS("Flush done\n");
1811 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
1812 TARGET_FMT_lx
")\n", base
, end
, mask
);
1813 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
1814 tlb_flush_page(cs
, page
);
1816 LOG_BATS("Flush done\n");
1820 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
1823 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
1824 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
1827 void helper_store_ibatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1830 #if defined(FLUSH_ALL_TLBS)
1831 PowerPCCPU
*cpu
= env_archcpu(env
);
1834 dump_store_bat(env
, 'I', 0, nr
, value
);
1835 if (env
->IBAT
[0][nr
] != value
) {
1836 mask
= (value
<< 15) & 0x0FFE0000UL
;
1837 #if !defined(FLUSH_ALL_TLBS)
1838 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1841 * When storing valid upper BAT, mask BEPI and BRPN and
1842 * invalidate all TLBs covered by this BAT
1844 mask
= (value
<< 15) & 0x0FFE0000UL
;
1845 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1846 (value
& ~0x0001FFFFUL
& ~mask
);
1847 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
1848 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
1849 #if !defined(FLUSH_ALL_TLBS)
1850 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1852 tlb_flush(env_cpu(env
));
1857 void helper_store_ibatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1859 dump_store_bat(env
, 'I', 1, nr
, value
);
1860 env
->IBAT
[1][nr
] = value
;
1863 void helper_store_dbatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1866 #if defined(FLUSH_ALL_TLBS)
1867 PowerPCCPU
*cpu
= env_archcpu(env
);
1870 dump_store_bat(env
, 'D', 0, nr
, value
);
1871 if (env
->DBAT
[0][nr
] != value
) {
1873 * When storing valid upper BAT, mask BEPI and BRPN and
1874 * invalidate all TLBs covered by this BAT
1876 mask
= (value
<< 15) & 0x0FFE0000UL
;
1877 #if !defined(FLUSH_ALL_TLBS)
1878 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
1880 mask
= (value
<< 15) & 0x0FFE0000UL
;
1881 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1882 (value
& ~0x0001FFFFUL
& ~mask
);
1883 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
1884 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
1885 #if !defined(FLUSH_ALL_TLBS)
1886 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
1888 tlb_flush(env_cpu(env
));
1893 void helper_store_dbatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1895 dump_store_bat(env
, 'D', 1, nr
, value
);
1896 env
->DBAT
[1][nr
] = value
;
1899 void helper_store_601_batu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1902 #if defined(FLUSH_ALL_TLBS)
1903 PowerPCCPU
*cpu
= env_archcpu(env
);
1907 dump_store_bat(env
, 'I', 0, nr
, value
);
1908 if (env
->IBAT
[0][nr
] != value
) {
1909 #if defined(FLUSH_ALL_TLBS)
1912 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
1913 if (env
->IBAT
[1][nr
] & 0x40) {
1914 /* Invalidate BAT only if it is valid */
1915 #if !defined(FLUSH_ALL_TLBS)
1916 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1922 * When storing valid upper BAT, mask BEPI and BRPN and
1923 * invalidate all TLBs covered by this BAT
1925 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1926 (value
& ~0x0001FFFFUL
& ~mask
);
1927 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
1928 if (env
->IBAT
[1][nr
] & 0x40) {
1929 #if !defined(FLUSH_ALL_TLBS)
1930 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1935 #if defined(FLUSH_ALL_TLBS)
1937 tlb_flush(env_cpu(env
));
1943 void helper_store_601_batl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1945 #if !defined(FLUSH_ALL_TLBS)
1948 PowerPCCPU
*cpu
= env_archcpu(env
);
1952 dump_store_bat(env
, 'I', 1, nr
, value
);
1953 if (env
->IBAT
[1][nr
] != value
) {
1954 #if defined(FLUSH_ALL_TLBS)
1957 if (env
->IBAT
[1][nr
] & 0x40) {
1958 #if !defined(FLUSH_ALL_TLBS)
1959 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
1960 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1966 #if !defined(FLUSH_ALL_TLBS)
1967 mask
= (value
<< 17) & 0x0FFE0000UL
;
1968 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1973 env
->IBAT
[1][nr
] = value
;
1974 env
->DBAT
[1][nr
] = value
;
1975 #if defined(FLUSH_ALL_TLBS)
1977 tlb_flush(env_cpu(env
));
1983 /*****************************************************************************/
1984 /* TLB management */
1985 void ppc_tlb_invalidate_all(CPUPPCState
*env
)
1987 #if defined(TARGET_PPC64)
1988 if (mmu_is_64bit(env
->mmu_model
)) {
1989 env
->tlb_need_flush
= 0;
1990 tlb_flush(env_cpu(env
));
1992 #endif /* defined(TARGET_PPC64) */
1993 switch (env
->mmu_model
) {
1994 case POWERPC_MMU_SOFT_6xx
:
1995 case POWERPC_MMU_SOFT_74xx
:
1996 ppc6xx_tlb_invalidate_all(env
);
1998 case POWERPC_MMU_SOFT_4xx
:
1999 case POWERPC_MMU_SOFT_4xx_Z
:
2000 ppc4xx_tlb_invalidate_all(env
);
2002 case POWERPC_MMU_REAL
:
2003 cpu_abort(env_cpu(env
), "No TLB for PowerPC 4xx in real mode\n");
2005 case POWERPC_MMU_MPC8xx
:
2007 cpu_abort(env_cpu(env
), "MPC8xx MMU model is not implemented\n");
2009 case POWERPC_MMU_BOOKE
:
2010 tlb_flush(env_cpu(env
));
2012 case POWERPC_MMU_BOOKE206
:
2013 booke206_flush_tlb(env
, -1, 0);
2015 case POWERPC_MMU_32B
:
2016 case POWERPC_MMU_601
:
2017 env
->tlb_need_flush
= 0;
2018 tlb_flush(env_cpu(env
));
2022 cpu_abort(env_cpu(env
), "Unknown MMU model %x\n", env
->mmu_model
);
2027 void ppc_tlb_invalidate_one(CPUPPCState
*env
, target_ulong addr
)
2029 #if !defined(FLUSH_ALL_TLBS)
2030 addr
&= TARGET_PAGE_MASK
;
2031 #if defined(TARGET_PPC64)
2032 if (mmu_is_64bit(env
->mmu_model
)) {
2033 /* tlbie invalidate TLBs for all segments */
2035 * XXX: given the fact that there are too many segments to invalidate,
2036 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2037 * we just invalidate all TLBs
2039 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
2041 #endif /* defined(TARGET_PPC64) */
2042 switch (env
->mmu_model
) {
2043 case POWERPC_MMU_SOFT_6xx
:
2044 case POWERPC_MMU_SOFT_74xx
:
2045 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
2046 if (env
->id_tlbs
== 1) {
2047 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
2050 case POWERPC_MMU_32B
:
2051 case POWERPC_MMU_601
:
2053 * Actual CPUs invalidate entire congruence classes based on
2054 * the geometry of their TLBs and some OSes take that into
2055 * account, we just mark the TLB to be flushed later (context
2056 * synchronizing event or sync instruction on 32-bit).
2058 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
2061 /* Should never reach here with other MMU models */
2065 ppc_tlb_invalidate_all(env
);
2069 /*****************************************************************************/
2070 /* Special registers manipulation */
2071 #if defined(TARGET_PPC64)
2072 void ppc_store_ptcr(CPUPPCState
*env
, target_ulong value
)
2074 PowerPCCPU
*cpu
= env_archcpu(env
);
2075 target_ulong ptcr_mask
= PTCR_PATB
| PTCR_PATS
;
2076 target_ulong patbsize
= value
& PTCR_PATS
;
2078 qemu_log_mask(CPU_LOG_MMU
, "%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2081 assert(env
->mmu_model
& POWERPC_MMU_3_00
);
2083 if (value
& ~ptcr_mask
) {
2084 error_report("Invalid bits 0x"TARGET_FMT_lx
" set in PTCR",
2085 value
& ~ptcr_mask
);
2089 if (patbsize
> 24) {
2090 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx
2091 " stored in PTCR", patbsize
);
2095 env
->spr
[SPR_PTCR
] = value
;
2098 #endif /* defined(TARGET_PPC64) */
2100 /* Segment registers load and store */
2101 target_ulong
helper_load_sr(CPUPPCState
*env
, target_ulong sr_num
)
2103 #if defined(TARGET_PPC64)
2104 if (mmu_is_64bit(env
->mmu_model
)) {
2109 return env
->sr
[sr_num
];
2112 void helper_store_sr(CPUPPCState
*env
, target_ulong srnum
, target_ulong value
)
2114 qemu_log_mask(CPU_LOG_MMU
,
2115 "%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2116 (int)srnum
, value
, env
->sr
[srnum
]);
2117 #if defined(TARGET_PPC64)
2118 if (mmu_is_64bit(env
->mmu_model
)) {
2119 PowerPCCPU
*cpu
= env_archcpu(env
);
2120 uint64_t esid
, vsid
;
2123 esid
= ((uint64_t)(srnum
& 0xf) << 28) | SLB_ESID_V
;
2126 vsid
= (value
& 0xfffffff) << 12;
2128 vsid
|= ((value
>> 27) & 0xf) << 8;
2130 ppc_store_slb(cpu
, srnum
, esid
, vsid
);
2133 if (env
->sr
[srnum
] != value
) {
2134 env
->sr
[srnum
] = value
;
2136 * Invalidating 256MB of virtual memory in 4kB pages is way
2137 * longer than flushing the whole TLB.
2139 #if !defined(FLUSH_ALL_TLBS) && 0
2141 target_ulong page
, end
;
2142 /* Invalidate 256 MB of virtual memory */
2143 page
= (16 << 20) * srnum
;
2144 end
= page
+ (16 << 20);
2145 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2146 tlb_flush_page(env_cpu(env
), page
);
2150 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
2155 /* TLB management */
2156 void helper_tlbia(CPUPPCState
*env
)
2158 ppc_tlb_invalidate_all(env
);
2161 void helper_tlbie(CPUPPCState
*env
, target_ulong addr
)
2163 ppc_tlb_invalidate_one(env
, addr
);
2166 void helper_tlbiva(CPUPPCState
*env
, target_ulong addr
)
2168 /* tlbiva instruction only exists on BookE */
2169 assert(env
->mmu_model
== POWERPC_MMU_BOOKE
);
2171 cpu_abort(env_cpu(env
), "BookE MMU model is not implemented\n");
2174 /* Software driven TLBs management */
2175 /* PowerPC 602/603 software TLB load instructions helpers */
2176 static void do_6xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2178 target_ulong RPN
, CMP
, EPN
;
2181 RPN
= env
->spr
[SPR_RPA
];
2183 CMP
= env
->spr
[SPR_ICMP
];
2184 EPN
= env
->spr
[SPR_IMISS
];
2186 CMP
= env
->spr
[SPR_DCMP
];
2187 EPN
= env
->spr
[SPR_DMISS
];
2189 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
2190 (void)EPN
; /* avoid a compiler warning */
2191 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2192 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2194 /* Store this TLB */
2195 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2196 way
, is_code
, CMP
, RPN
);
2199 void helper_6xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2201 do_6xx_tlb(env
, EPN
, 0);
2204 void helper_6xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2206 do_6xx_tlb(env
, EPN
, 1);
2209 /* PowerPC 74xx software TLB load instructions helpers */
2210 static void do_74xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2212 target_ulong RPN
, CMP
, EPN
;
2215 RPN
= env
->spr
[SPR_PTELO
];
2216 CMP
= env
->spr
[SPR_PTEHI
];
2217 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
2218 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
2219 (void)EPN
; /* avoid a compiler warning */
2220 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2221 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2223 /* Store this TLB */
2224 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2225 way
, is_code
, CMP
, RPN
);
2228 void helper_74xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2230 do_74xx_tlb(env
, EPN
, 0);
2233 void helper_74xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2235 do_74xx_tlb(env
, EPN
, 1);
2238 /*****************************************************************************/
2239 /* PowerPC 601 specific instructions (POWER bridge) */
2241 target_ulong
helper_rac(CPUPPCState
*env
, target_ulong addr
)
2245 target_ulong ret
= 0;
2248 * We don't have to generate many instances of this instruction,
2249 * as rac is supervisor only.
2251 * XXX: FIX THIS: Pretend we have no BAT
2253 nb_BATs
= env
->nb_BATs
;
2255 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0) {
2258 env
->nb_BATs
= nb_BATs
;
2262 static inline target_ulong
booke_tlb_to_page_size(int size
)
2264 return 1024 << (2 * size
);
2267 static inline int booke_page_size_to_tlb(target_ulong page_size
)
2271 switch (page_size
) {
2305 #if defined(TARGET_PPC64)
2306 case 0x000100000000ULL
:
2309 case 0x000400000000ULL
:
2312 case 0x001000000000ULL
:
2315 case 0x004000000000ULL
:
2318 case 0x010000000000ULL
:
2330 /* Helpers for 4xx TLB management */
2331 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2333 #define PPC4XX_TLBHI_V 0x00000040
2334 #define PPC4XX_TLBHI_E 0x00000020
2335 #define PPC4XX_TLBHI_SIZE_MIN 0
2336 #define PPC4XX_TLBHI_SIZE_MAX 7
2337 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2338 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2339 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2341 #define PPC4XX_TLBLO_EX 0x00000200
2342 #define PPC4XX_TLBLO_WR 0x00000100
2343 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2344 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2346 target_ulong
helper_4xx_tlbre_hi(CPUPPCState
*env
, target_ulong entry
)
2352 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2353 tlb
= &env
->tlb
.tlbe
[entry
];
2355 if (tlb
->prot
& PAGE_VALID
) {
2356 ret
|= PPC4XX_TLBHI_V
;
2358 size
= booke_page_size_to_tlb(tlb
->size
);
2359 if (size
< PPC4XX_TLBHI_SIZE_MIN
|| size
> PPC4XX_TLBHI_SIZE_MAX
) {
2360 size
= PPC4XX_TLBHI_SIZE_DEFAULT
;
2362 ret
|= size
<< PPC4XX_TLBHI_SIZE_SHIFT
;
2363 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
2367 target_ulong
helper_4xx_tlbre_lo(CPUPPCState
*env
, target_ulong entry
)
2372 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2373 tlb
= &env
->tlb
.tlbe
[entry
];
2375 if (tlb
->prot
& PAGE_EXEC
) {
2376 ret
|= PPC4XX_TLBLO_EX
;
2378 if (tlb
->prot
& PAGE_WRITE
) {
2379 ret
|= PPC4XX_TLBLO_WR
;
2384 void helper_4xx_tlbwe_hi(CPUPPCState
*env
, target_ulong entry
,
2387 CPUState
*cs
= env_cpu(env
);
2389 target_ulong page
, end
;
2391 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2393 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2394 tlb
= &env
->tlb
.tlbe
[entry
];
2395 /* Invalidate previous TLB (if it's valid) */
2396 if (tlb
->prot
& PAGE_VALID
) {
2397 end
= tlb
->EPN
+ tlb
->size
;
2398 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
2399 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2400 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2401 tlb_flush_page(cs
, page
);
2404 tlb
->size
= booke_tlb_to_page_size((val
>> PPC4XX_TLBHI_SIZE_SHIFT
)
2405 & PPC4XX_TLBHI_SIZE_MASK
);
2407 * We cannot handle TLB size < TARGET_PAGE_SIZE.
2408 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
2410 if ((val
& PPC4XX_TLBHI_V
) && tlb
->size
< TARGET_PAGE_SIZE
) {
2411 cpu_abort(cs
, "TLB size " TARGET_FMT_lu
" < %u "
2412 "are not supported (%d)\n"
2413 "Please implement TARGET_PAGE_BITS_VARY\n",
2414 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
2416 tlb
->EPN
= val
& ~(tlb
->size
- 1);
2417 if (val
& PPC4XX_TLBHI_V
) {
2418 tlb
->prot
|= PAGE_VALID
;
2419 if (val
& PPC4XX_TLBHI_E
) {
2420 /* XXX: TO BE FIXED */
2422 "Little-endian TLB entries are not supported by now\n");
2425 tlb
->prot
&= ~PAGE_VALID
;
2427 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
2428 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2429 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2430 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2431 tlb
->prot
& PAGE_READ
? 'r' : '-',
2432 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2433 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2434 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2435 /* Invalidate new TLB (if valid) */
2436 if (tlb
->prot
& PAGE_VALID
) {
2437 end
= tlb
->EPN
+ tlb
->size
;
2438 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
2439 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2440 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2441 tlb_flush_page(cs
, page
);
2446 void helper_4xx_tlbwe_lo(CPUPPCState
*env
, target_ulong entry
,
2451 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2453 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2454 tlb
= &env
->tlb
.tlbe
[entry
];
2455 tlb
->attr
= val
& PPC4XX_TLBLO_ATTR_MASK
;
2456 tlb
->RPN
= val
& PPC4XX_TLBLO_RPN_MASK
;
2457 tlb
->prot
= PAGE_READ
;
2458 if (val
& PPC4XX_TLBLO_EX
) {
2459 tlb
->prot
|= PAGE_EXEC
;
2461 if (val
& PPC4XX_TLBLO_WR
) {
2462 tlb
->prot
|= PAGE_WRITE
;
2464 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2465 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2466 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2467 tlb
->prot
& PAGE_READ
? 'r' : '-',
2468 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2469 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2470 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2473 target_ulong
helper_4xx_tlbsx(CPUPPCState
*env
, target_ulong address
)
2475 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
2478 /* PowerPC 440 TLB management */
2479 void helper_440_tlbwe(CPUPPCState
*env
, uint32_t word
, target_ulong entry
,
2483 target_ulong EPN
, RPN
, size
;
2486 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
2487 __func__
, word
, (int)entry
, value
);
2490 tlb
= &env
->tlb
.tlbe
[entry
];
2493 /* Just here to please gcc */
2495 EPN
= value
& 0xFFFFFC00;
2496 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
) {
2500 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
2501 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
) {
2506 tlb
->attr
|= (value
>> 8) & 1;
2507 if (value
& 0x200) {
2508 tlb
->prot
|= PAGE_VALID
;
2510 if (tlb
->prot
& PAGE_VALID
) {
2511 tlb
->prot
&= ~PAGE_VALID
;
2515 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
2516 if (do_flush_tlbs
) {
2517 tlb_flush(env_cpu(env
));
2521 RPN
= value
& 0xFFFFFC0F;
2522 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
) {
2523 tlb_flush(env_cpu(env
));
2528 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
2529 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
2531 tlb
->prot
|= PAGE_READ
<< 4;
2534 tlb
->prot
|= PAGE_WRITE
<< 4;
2537 tlb
->prot
|= PAGE_EXEC
<< 4;
2540 tlb
->prot
|= PAGE_READ
;
2543 tlb
->prot
|= PAGE_WRITE
;
2546 tlb
->prot
|= PAGE_EXEC
;
2552 target_ulong
helper_440_tlbre(CPUPPCState
*env
, uint32_t word
,
2560 tlb
= &env
->tlb
.tlbe
[entry
];
2563 /* Just here to please gcc */
2566 size
= booke_page_size_to_tlb(tlb
->size
);
2567 if (size
< 0 || size
> 0xF) {
2571 if (tlb
->attr
& 0x1) {
2574 if (tlb
->prot
& PAGE_VALID
) {
2577 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
2578 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
2584 ret
= tlb
->attr
& ~0x1;
2585 if (tlb
->prot
& (PAGE_READ
<< 4)) {
2588 if (tlb
->prot
& (PAGE_WRITE
<< 4)) {
2591 if (tlb
->prot
& (PAGE_EXEC
<< 4)) {
2594 if (tlb
->prot
& PAGE_READ
) {
2597 if (tlb
->prot
& PAGE_WRITE
) {
2600 if (tlb
->prot
& PAGE_EXEC
) {
2608 target_ulong
helper_440_tlbsx(CPUPPCState
*env
, target_ulong address
)
2610 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
2613 /* PowerPC BookE 2.06 TLB management */
2615 static ppcmas_tlb_t
*booke206_cur_tlb(CPUPPCState
*env
)
2617 uint32_t tlbncfg
= 0;
2618 int esel
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ESEL_MASK
) >> MAS0_ESEL_SHIFT
;
2619 int ea
= (env
->spr
[SPR_BOOKE_MAS2
] & MAS2_EPN_MASK
);
2622 tlb
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
2623 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlb
];
2625 if ((tlbncfg
& TLBnCFG_HES
) && (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_HES
)) {
2626 cpu_abort(env_cpu(env
), "we don't support HES yet\n");
2629 return booke206_get_tlbm(env
, tlb
, ea
, esel
);
2632 void helper_booke_setpid(CPUPPCState
*env
, uint32_t pidn
, target_ulong pid
)
2634 env
->spr
[pidn
] = pid
;
2635 /* changing PIDs mean we're in a different address space now */
2636 tlb_flush(env_cpu(env
));
2639 void helper_booke_set_eplc(CPUPPCState
*env
, target_ulong val
)
2641 env
->spr
[SPR_BOOKE_EPLC
] = val
& EPID_MASK
;
2642 tlb_flush_by_mmuidx(env_cpu(env
), 1 << PPC_TLB_EPID_LOAD
);
2644 void helper_booke_set_epsc(CPUPPCState
*env
, target_ulong val
)
2646 env
->spr
[SPR_BOOKE_EPSC
] = val
& EPID_MASK
;
2647 tlb_flush_by_mmuidx(env_cpu(env
), 1 << PPC_TLB_EPID_STORE
);
2650 static inline void flush_page(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
2652 if (booke206_tlb_to_page_size(env
, tlb
) == TARGET_PAGE_SIZE
) {
2653 tlb_flush_page(env_cpu(env
), tlb
->mas2
& MAS2_EPN_MASK
);
2655 tlb_flush(env_cpu(env
));
2659 void helper_booke206_tlbwe(CPUPPCState
*env
)
2661 uint32_t tlbncfg
, tlbn
;
2663 uint32_t size_tlb
, size_ps
;
2667 switch (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_WQ_MASK
) {
2668 case MAS0_WQ_ALWAYS
:
2669 /* good to go, write that entry */
2672 /* XXX check if reserved */
2677 case MAS0_WQ_CLR_RSRV
:
2678 /* XXX clear entry */
2681 /* no idea what to do */
2685 if (((env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ATSEL
) == MAS0_ATSEL_LRAT
) &&
2687 /* XXX we don't support direct LRAT setting yet */
2688 fprintf(stderr
, "cpu: don't support LRAT setting yet\n");
2692 tlbn
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
2693 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
2695 tlb
= booke206_cur_tlb(env
);
2698 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
2699 POWERPC_EXCP_INVAL
|
2700 POWERPC_EXCP_INVAL_INVAL
, GETPC());
2703 /* check that we support the targeted size */
2704 size_tlb
= (env
->spr
[SPR_BOOKE_MAS1
] & MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
2705 size_ps
= booke206_tlbnps(env
, tlbn
);
2706 if ((env
->spr
[SPR_BOOKE_MAS1
] & MAS1_VALID
) && (tlbncfg
& TLBnCFG_AVAIL
) &&
2707 !(size_ps
& (1 << size_tlb
))) {
2708 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
2709 POWERPC_EXCP_INVAL
|
2710 POWERPC_EXCP_INVAL_INVAL
, GETPC());
2714 cpu_abort(env_cpu(env
), "missing HV implementation\n");
2717 if (tlb
->mas1
& MAS1_VALID
) {
2719 * Invalidate the page in QEMU TLB if it was a valid entry.
2721 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
2722 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
2723 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
2725 * "Note that when an L2 TLB entry is written, it may be displacing an
2726 * already valid entry in the same L2 TLB location (a victim). If a
2727 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
2728 * TLB entry is automatically invalidated."
2730 flush_page(env
, tlb
);
2733 tlb
->mas7_3
= ((uint64_t)env
->spr
[SPR_BOOKE_MAS7
] << 32) |
2734 env
->spr
[SPR_BOOKE_MAS3
];
2735 tlb
->mas1
= env
->spr
[SPR_BOOKE_MAS1
];
2737 if ((env
->spr
[SPR_MMUCFG
] & MMUCFG_MAVN
) == MMUCFG_MAVN_V2
) {
2738 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
2739 booke206_fixed_size_tlbn(env
, tlbn
, tlb
);
2741 if (!(tlbncfg
& TLBnCFG_AVAIL
)) {
2742 /* force !AVAIL TLB entries to correct page size */
2743 tlb
->mas1
&= ~MAS1_TSIZE_MASK
;
2744 /* XXX can be configured in MMUCSR0 */
2745 tlb
->mas1
|= (tlbncfg
& TLBnCFG_MINSIZE
) >> 12;
2749 /* Make a mask from TLB size to discard invalid bits in EPN field */
2750 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
2751 /* Add a mask for page attributes */
2752 mask
|= MAS2_ACM
| MAS2_VLE
| MAS2_W
| MAS2_I
| MAS2_M
| MAS2_G
| MAS2_E
;
2756 * Executing a tlbwe instruction in 32-bit mode will set bits
2757 * 0:31 of the TLB EPN field to zero.
2762 tlb
->mas2
= env
->spr
[SPR_BOOKE_MAS2
] & mask
;
2764 if (!(tlbncfg
& TLBnCFG_IPROT
)) {
2765 /* no IPROT supported by TLB */
2766 tlb
->mas1
&= ~MAS1_IPROT
;
2769 flush_page(env
, tlb
);
2772 static inline void booke206_tlb_to_mas(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
2774 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
2775 int way
= booke206_tlbm_to_way(env
, tlb
);
2777 env
->spr
[SPR_BOOKE_MAS0
] = tlbn
<< MAS0_TLBSEL_SHIFT
;
2778 env
->spr
[SPR_BOOKE_MAS0
] |= way
<< MAS0_ESEL_SHIFT
;
2779 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
2781 env
->spr
[SPR_BOOKE_MAS1
] = tlb
->mas1
;
2782 env
->spr
[SPR_BOOKE_MAS2
] = tlb
->mas2
;
2783 env
->spr
[SPR_BOOKE_MAS3
] = tlb
->mas7_3
;
2784 env
->spr
[SPR_BOOKE_MAS7
] = tlb
->mas7_3
>> 32;
2787 void helper_booke206_tlbre(CPUPPCState
*env
)
2789 ppcmas_tlb_t
*tlb
= NULL
;
2791 tlb
= booke206_cur_tlb(env
);
2793 env
->spr
[SPR_BOOKE_MAS1
] = 0;
2795 booke206_tlb_to_mas(env
, tlb
);
2799 void helper_booke206_tlbsx(CPUPPCState
*env
, target_ulong address
)
2801 ppcmas_tlb_t
*tlb
= NULL
;
2806 spid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID_MASK
) >> MAS6_SPID_SHIFT
;
2807 sas
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
;
2809 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
2810 int ways
= booke206_tlb_ways(env
, i
);
2812 for (j
= 0; j
< ways
; j
++) {
2813 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
2819 if (ppcmas_tlb_check(env
, tlb
, &raddr
, address
, spid
)) {
2823 if (sas
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
2827 booke206_tlb_to_mas(env
, tlb
);
2832 /* no entry found, fill with defaults */
2833 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
2834 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
2835 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
2836 env
->spr
[SPR_BOOKE_MAS3
] = 0;
2837 env
->spr
[SPR_BOOKE_MAS7
] = 0;
2839 if (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
) {
2840 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
2843 env
->spr
[SPR_BOOKE_MAS1
] |= (env
->spr
[SPR_BOOKE_MAS6
] >> 16)
2846 /* next victim logic */
2847 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
2849 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
2850 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
2853 static inline void booke206_invalidate_ea_tlb(CPUPPCState
*env
, int tlbn
,
2857 int ways
= booke206_tlb_ways(env
, tlbn
);
2860 for (i
= 0; i
< ways
; i
++) {
2861 ppcmas_tlb_t
*tlb
= booke206_get_tlbm(env
, tlbn
, ea
, i
);
2865 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
2866 if (((tlb
->mas2
& MAS2_EPN_MASK
) == (ea
& mask
)) &&
2867 !(tlb
->mas1
& MAS1_IPROT
)) {
2868 tlb
->mas1
&= ~MAS1_VALID
;
2873 void helper_booke206_tlbivax(CPUPPCState
*env
, target_ulong address
)
2877 if (address
& 0x4) {
2878 /* flush all entries */
2879 if (address
& 0x8) {
2880 /* flush all of TLB1 */
2881 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB1
, 1);
2883 /* flush all of TLB0 */
2884 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB0
, 0);
2889 if (address
& 0x8) {
2890 /* flush TLB1 entries */
2891 booke206_invalidate_ea_tlb(env
, 1, address
);
2896 /* flush TLB0 entries */
2897 booke206_invalidate_ea_tlb(env
, 0, address
);
2899 tlb_flush_page(cs
, address
& MAS2_EPN_MASK
);
2904 void helper_booke206_tlbilx0(CPUPPCState
*env
, target_ulong address
)
2906 /* XXX missing LPID handling */
2907 booke206_flush_tlb(env
, -1, 1);
2910 void helper_booke206_tlbilx1(CPUPPCState
*env
, target_ulong address
)
2913 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
2914 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
2917 /* XXX missing LPID handling */
2918 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
2919 tlb_size
= booke206_tlb_size(env
, i
);
2920 for (j
= 0; j
< tlb_size
; j
++) {
2921 if (!(tlb
[j
].mas1
& MAS1_IPROT
) &&
2922 ((tlb
[j
].mas1
& MAS1_TID_MASK
) == tid
)) {
2923 tlb
[j
].mas1
&= ~MAS1_VALID
;
2926 tlb
+= booke206_tlb_size(env
, i
);
2928 tlb_flush(env_cpu(env
));
2931 void helper_booke206_tlbilx3(CPUPPCState
*env
, target_ulong address
)
2935 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
2936 int pid
= tid
>> MAS6_SPID_SHIFT
;
2937 int sgs
= env
->spr
[SPR_BOOKE_MAS5
] & MAS5_SGS
;
2938 int ind
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SIND
) ? MAS1_IND
: 0;
2939 /* XXX check for unsupported isize and raise an invalid opcode then */
2940 int size
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_ISIZE_MASK
;
2941 /* XXX implement MAV2 handling */
2944 /* XXX missing LPID handling */
2945 /* flush by pid and ea */
2946 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
2947 int ways
= booke206_tlb_ways(env
, i
);
2949 for (j
= 0; j
< ways
; j
++) {
2950 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
2954 if ((ppcmas_tlb_check(env
, tlb
, NULL
, address
, pid
) != 0) ||
2955 (tlb
->mas1
& MAS1_IPROT
) ||
2956 ((tlb
->mas1
& MAS1_IND
) != ind
) ||
2957 ((tlb
->mas8
& MAS8_TGS
) != sgs
)) {
2960 if (mav2
&& ((tlb
->mas1
& MAS1_TSIZE_MASK
) != size
)) {
2961 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
2964 /* XXX e500mc doesn't match SAS, but other cores might */
2965 tlb
->mas1
&= ~MAS1_VALID
;
2968 tlb_flush(env_cpu(env
));
2971 void helper_booke206_tlbflush(CPUPPCState
*env
, target_ulong type
)
2976 flags
|= BOOKE206_FLUSH_TLB1
;
2980 flags
|= BOOKE206_FLUSH_TLB0
;
2983 booke206_flush_tlb(env
, flags
, 1);
2987 void helper_check_tlb_flush_local(CPUPPCState
*env
)
2989 check_tlb_flush(env
, false);
2992 void helper_check_tlb_flush_global(CPUPPCState
*env
)
2994 check_tlb_flush(env
, true);
2997 /*****************************************************************************/
2999 bool ppc_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
3000 MMUAccessType access_type
, int mmu_idx
,
3001 bool probe
, uintptr_t retaddr
)
3003 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3004 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cs
);
3005 CPUPPCState
*env
= &cpu
->env
;
3008 if (pcc
->handle_mmu_fault
) {
3009 ret
= pcc
->handle_mmu_fault(cpu
, addr
, access_type
, mmu_idx
);
3011 ret
= cpu_ppc_handle_mmu_fault(env
, addr
, access_type
, mmu_idx
);
3013 if (unlikely(ret
!= 0)) {
3017 raise_exception_err_ra(env
, cs
->exception_index
, env
->error_code
,