2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
23 #include "exec/helper-proto.h"
24 #include "sysemu/kvm.h"
26 #include "mmu-hash64.h"
27 #include "mmu-hash32.h"
28 #include "exec/exec-all.h"
29 #include "exec/cpu_ldst.h"
31 #include "helper_regs.h"
32 #include "qemu/error-report.h"
33 #include "qemu/main-loop.h"
34 #include "qemu/qemu-print.h"
35 #include "mmu-book3s-v3.h"
36 #include "mmu-radix64.h"
38 /* #define DEBUG_MMU */
39 /* #define DEBUG_BATS */
40 /* #define DEBUG_SOFTWARE_TLB */
41 /* #define DUMP_PAGE_TABLES */
42 /* #define FLUSH_ALL_TLBS */
45 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
47 # define LOG_MMU_STATE(cpu) do { } while (0)
50 #ifdef DEBUG_SOFTWARE_TLB
51 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
53 # define LOG_SWTLB(...) do { } while (0)
57 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
59 # define LOG_BATS(...) do { } while (0)
62 /*****************************************************************************/
63 /* PowerPC MMU emulation */
65 /* Context used internally during MMU translations */
66 typedef struct mmu_ctx_t mmu_ctx_t
;
68 hwaddr raddr
; /* Real address */
69 hwaddr eaddr
; /* Effective address */
70 int prot
; /* Protection bits */
71 hwaddr hash
[2]; /* Pagetable hash values */
72 target_ulong ptem
; /* Virtual segment ID | API */
73 int key
; /* Access key */
74 int nx
; /* Non-execute area */
77 /* Common routines used by software and hardware TLBs emulation */
78 static inline int pte_is_valid(target_ulong pte0
)
80 return pte0
& 0x80000000 ? 1 : 0;
83 static inline void pte_invalidate(target_ulong
*pte0
)
88 #define PTE_PTEM_MASK 0x7FFFFFBF
89 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
91 static int pp_check(int key
, int pp
, int nx
)
95 /* Compute access rights */
102 access
|= PAGE_WRITE
;
118 access
= PAGE_READ
| PAGE_WRITE
;
129 static int check_prot(int prot
, int rw
, int access_type
)
133 if (access_type
== ACCESS_CODE
) {
134 if (prot
& PAGE_EXEC
) {
140 if (prot
& PAGE_WRITE
) {
146 if (prot
& PAGE_READ
) {
156 static inline int ppc6xx_tlb_pte_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
157 target_ulong pte1
, int h
,
160 target_ulong ptem
, mmask
;
161 int access
, ret
, pteh
, ptev
, pp
;
164 /* Check validity and table match */
165 ptev
= pte_is_valid(pte0
);
166 pteh
= (pte0
>> 6) & 1;
167 if (ptev
&& h
== pteh
) {
168 /* Check vsid & api */
169 ptem
= pte0
& PTE_PTEM_MASK
;
170 mmask
= PTE_CHECK_MASK
;
171 pp
= pte1
& 0x00000003;
172 if (ptem
== ctx
->ptem
) {
173 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
174 /* all matches should have equal RPN, WIMG & PP */
175 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
176 qemu_log_mask(CPU_LOG_MMU
, "Bad RPN/WIMG/PP\n");
180 /* Compute access rights */
181 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
182 /* Keep the matching PTE information */
185 ret
= check_prot(ctx
->prot
, rw
, type
);
188 qemu_log_mask(CPU_LOG_MMU
, "PTE access granted !\n");
190 /* Access right violation */
191 qemu_log_mask(CPU_LOG_MMU
, "PTE access rejected\n");
199 static int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
204 /* Update page flags */
205 if (!(*pte1p
& 0x00000100)) {
206 /* Update accessed flag */
207 *pte1p
|= 0x00000100;
210 if (!(*pte1p
& 0x00000080)) {
211 if (rw
== 1 && ret
== 0) {
212 /* Update changed flag */
213 *pte1p
|= 0x00000080;
216 /* Force page fault for first write access */
217 ctx
->prot
&= ~PAGE_WRITE
;
224 /* Software driven TLB helpers */
225 static inline int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
226 int way
, int is_code
)
230 /* Select TLB num in a way from address */
231 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
233 nr
+= env
->tlb_per_way
* way
;
234 /* 6xx have separate TLBs for instructions and data */
235 if (is_code
&& env
->id_tlbs
== 1) {
242 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState
*env
)
247 /* LOG_SWTLB("Invalidate all TLBs\n"); */
248 /* Invalidate all defined software TLB */
250 if (env
->id_tlbs
== 1) {
253 for (nr
= 0; nr
< max
; nr
++) {
254 tlb
= &env
->tlb
.tlb6
[nr
];
255 pte_invalidate(&tlb
->pte0
);
257 tlb_flush(env_cpu(env
));
260 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState
*env
,
262 int is_code
, int match_epn
)
264 #if !defined(FLUSH_ALL_TLBS)
265 CPUState
*cs
= env_cpu(env
);
269 /* Invalidate ITLB + DTLB, all ways */
270 for (way
= 0; way
< env
->nb_ways
; way
++) {
271 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
272 tlb
= &env
->tlb
.tlb6
[nr
];
273 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
274 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
276 pte_invalidate(&tlb
->pte0
);
277 tlb_flush_page(cs
, tlb
->EPN
);
281 /* XXX: PowerPC specification say this is valid as well */
282 ppc6xx_tlb_invalidate_all(env
);
286 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState
*env
,
287 target_ulong eaddr
, int is_code
)
289 ppc6xx_tlb_invalidate_virt2(env
, eaddr
, is_code
, 0);
292 static void ppc6xx_tlb_store(CPUPPCState
*env
, target_ulong EPN
, int way
,
293 int is_code
, target_ulong pte0
, target_ulong pte1
)
298 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
299 tlb
= &env
->tlb
.tlb6
[nr
];
300 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
301 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
302 /* Invalidate any pending reference in QEMU for this virtual address */
303 ppc6xx_tlb_invalidate_virt2(env
, EPN
, is_code
, 1);
307 /* Store last way for LRU mechanism */
311 static inline int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
312 target_ulong eaddr
, int rw
, int access_type
)
319 ret
= -1; /* No TLB found */
320 for (way
= 0; way
< env
->nb_ways
; way
++) {
321 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
,
322 access_type
== ACCESS_CODE
? 1 : 0);
323 tlb
= &env
->tlb
.tlb6
[nr
];
324 /* This test "emulates" the PTE index match for hardware TLBs */
325 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
326 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
327 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
328 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
329 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
332 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
333 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
334 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
335 tlb
->EPN
, eaddr
, tlb
->pte1
,
336 rw
? 'S' : 'L', access_type
== ACCESS_CODE
? 'I' : 'D');
337 switch (ppc6xx_tlb_pte_check(ctx
, tlb
->pte0
, tlb
->pte1
,
338 0, rw
, access_type
)) {
340 /* TLB inconsistency */
343 /* Access violation */
354 * XXX: we should go on looping to check all TLBs
355 * consistency but we can speed-up the whole thing as
356 * the result would be undefined if TLBs are not
366 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
367 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
368 /* Update page flags */
369 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, rw
);
375 /* Perform BAT hit & translation */
376 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
377 int *validp
, int *protp
, target_ulong
*BATu
,
383 bl
= (*BATu
& 0x00001FFC) << 15;
386 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
387 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
389 pp
= *BATl
& 0x00000003;
391 prot
= PAGE_READ
| PAGE_EXEC
;
402 static int get_bat_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
403 target_ulong
virtual, int rw
, int type
)
405 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
406 target_ulong BEPIl
, BEPIu
, bl
;
410 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
411 type
== ACCESS_CODE
? 'I' : 'D', virtual);
414 BATlt
= env
->IBAT
[1];
415 BATut
= env
->IBAT
[0];
418 BATlt
= env
->DBAT
[1];
419 BATut
= env
->DBAT
[0];
422 for (i
= 0; i
< env
->nb_BATs
; i
++) {
425 BEPIu
= *BATu
& 0xF0000000;
426 BEPIl
= *BATu
& 0x0FFE0000;
427 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
428 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
429 " BATl " TARGET_FMT_lx
"\n", __func__
,
430 type
== ACCESS_CODE
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
431 if ((virtual & 0xF0000000) == BEPIu
&&
432 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
435 /* Get physical address */
436 ctx
->raddr
= (*BATl
& 0xF0000000) |
437 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
438 (virtual & 0x0001F000);
439 /* Compute access rights */
441 ret
= check_prot(ctx
->prot
, rw
, type
);
443 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
444 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
445 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
452 #if defined(DEBUG_BATS)
453 if (qemu_log_enabled()) {
454 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
455 for (i
= 0; i
< 4; i
++) {
458 BEPIu
= *BATu
& 0xF0000000;
459 BEPIl
= *BATu
& 0x0FFE0000;
460 bl
= (*BATu
& 0x00001FFC) << 15;
461 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
462 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
463 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
464 __func__
, type
== ACCESS_CODE
? 'I' : 'D', i
, virtual,
465 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
474 /* Perform segment based translation */
475 static inline int get_segment_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
476 target_ulong eaddr
, int rw
, int type
)
478 PowerPCCPU
*cpu
= env_archcpu(env
);
481 int ds
, pr
, target_page_bits
;
483 target_ulong sr
, pgidx
;
488 sr
= env
->sr
[eaddr
>> 28];
489 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
490 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
491 ds
= sr
& 0x80000000 ? 1 : 0;
492 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
493 vsid
= sr
& 0x00FFFFFF;
494 target_page_bits
= TARGET_PAGE_BITS
;
495 qemu_log_mask(CPU_LOG_MMU
,
496 "Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
497 " nip=" TARGET_FMT_lx
" lr=" TARGET_FMT_lx
498 " ir=%d dr=%d pr=%d %d t=%d\n",
499 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
500 (int)msr_dr
, pr
!= 0 ? 1 : 0, rw
, type
);
501 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
503 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
505 qemu_log_mask(CPU_LOG_MMU
,
506 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
507 ctx
->key
, ds
, ctx
->nx
, vsid
);
510 /* Check if instruction fetch is allowed, if needed */
511 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
512 /* Page address translation */
513 qemu_log_mask(CPU_LOG_MMU
, "htab_base " TARGET_FMT_plx
514 " htab_mask " TARGET_FMT_plx
515 " hash " TARGET_FMT_plx
"\n",
516 ppc_hash32_hpt_base(cpu
), ppc_hash32_hpt_mask(cpu
), hash
);
518 ctx
->hash
[1] = ~hash
;
520 /* Initialize real address with an invalid value */
521 ctx
->raddr
= (hwaddr
)-1ULL;
522 /* Software TLB search */
523 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, rw
, type
);
524 #if defined(DUMP_PAGE_TABLES)
525 if (qemu_loglevel_mask(CPU_LOG_MMU
)) {
526 CPUState
*cs
= env_cpu(env
);
528 uint32_t a0
, a1
, a2
, a3
;
530 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
531 "\n", ppc_hash32_hpt_base(cpu
),
532 ppc_hash32_hpt_mask(env
) + 0x80);
533 for (curaddr
= ppc_hash32_hpt_base(cpu
);
534 curaddr
< (ppc_hash32_hpt_base(cpu
)
535 + ppc_hash32_hpt_mask(cpu
) + 0x80);
537 a0
= ldl_phys(cs
->as
, curaddr
);
538 a1
= ldl_phys(cs
->as
, curaddr
+ 4);
539 a2
= ldl_phys(cs
->as
, curaddr
+ 8);
540 a3
= ldl_phys(cs
->as
, curaddr
+ 12);
541 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
542 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
543 curaddr
, a0
, a1
, a2
, a3
);
549 qemu_log_mask(CPU_LOG_MMU
, "No access allowed\n");
555 qemu_log_mask(CPU_LOG_MMU
, "direct store...\n");
556 /* Direct-store segment : absolutely *BUGGY* for now */
559 * Direct-store implies a 32-bit MMU.
560 * Check the Segment Register's bus unit ID (BUID).
562 sr
= env
->sr
[eaddr
>> 28];
563 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
565 * Memory-forced I/O controller interface access
567 * If T=1 and BUID=x'07F', the 601 performs a memory
568 * access to SR[28-31] LA[4-31], bypassing all protection
571 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
572 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
578 /* Integer load/store : only access allowed */
581 /* No code fetch is allowed in direct-store areas */
584 /* Floating point load/store */
587 /* lwarx, ldarx or srwcx. */
591 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
593 * Should make the instruction do no-op. As it already do
594 * no-op, it's quite easy :-)
602 qemu_log_mask(CPU_LOG_MMU
, "ERROR: instruction should not need "
603 "address translation\n");
606 if ((rw
== 1 || ctx
->key
!= 1) && (rw
== 0 || ctx
->key
!= 0)) {
617 /* Generic TLB check function for embedded PowerPC implementations */
618 static int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
620 target_ulong address
, uint32_t pid
, int ext
,
625 /* Check valid flag */
626 if (!(tlb
->prot
& PAGE_VALID
)) {
629 mask
= ~(tlb
->size
- 1);
630 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
631 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
632 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
634 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
637 /* Check effective address */
638 if ((address
& mask
) != tlb
->EPN
) {
641 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
643 /* Extend the physical address to 36 bits */
644 *raddrp
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
650 /* Generic TLB search function for PowerPC embedded implementations */
651 static int ppcemb_tlb_search(CPUPPCState
*env
, target_ulong address
,
658 /* Default return value is no match */
660 for (i
= 0; i
< env
->nb_tlb
; i
++) {
661 tlb
= &env
->tlb
.tlbe
[i
];
662 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
671 /* Helpers specific to PowerPC 40x implementations */
672 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState
*env
)
677 for (i
= 0; i
< env
->nb_tlb
; i
++) {
678 tlb
= &env
->tlb
.tlbe
[i
];
679 tlb
->prot
&= ~PAGE_VALID
;
681 tlb_flush(env_cpu(env
));
684 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
685 target_ulong address
, int rw
,
690 int i
, ret
, zsel
, zpr
, pr
;
693 raddr
= (hwaddr
)-1ULL;
695 for (i
= 0; i
< env
->nb_tlb
; i
++) {
696 tlb
= &env
->tlb
.tlbe
[i
];
697 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
698 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
701 zsel
= (tlb
->attr
>> 4) & 0xF;
702 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
703 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
704 __func__
, i
, zsel
, zpr
, rw
, tlb
->attr
);
705 /* Check execute enable bit */
713 /* All accesses granted */
714 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
719 /* Raise Zone protection fault. */
720 env
->spr
[SPR_40x_ESR
] = 1 << 22;
728 /* Check from TLB entry */
729 ctx
->prot
= tlb
->prot
;
730 ret
= check_prot(ctx
->prot
, rw
, access_type
);
732 env
->spr
[SPR_40x_ESR
] = 0;
738 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
739 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
744 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
745 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
750 void store_40x_sler(CPUPPCState
*env
, uint32_t val
)
752 /* XXX: TO BE FIXED */
753 if (val
!= 0x00000000) {
754 cpu_abort(env_cpu(env
),
755 "Little-endian regions are not supported by now\n");
757 env
->spr
[SPR_405_SLER
] = val
;
760 static inline int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
761 hwaddr
*raddr
, int *prot
,
762 target_ulong address
, int rw
,
763 int access_type
, int i
)
767 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
768 env
->spr
[SPR_BOOKE_PID
],
769 !env
->nb_pids
, i
) >= 0) {
773 if (env
->spr
[SPR_BOOKE_PID1
] &&
774 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
775 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
779 if (env
->spr
[SPR_BOOKE_PID2
] &&
780 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
781 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
785 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
791 prot2
= tlb
->prot
& 0xF;
793 prot2
= (tlb
->prot
>> 4) & 0xF;
796 /* Check the address space */
797 if (access_type
== ACCESS_CODE
) {
798 if (msr_ir
!= (tlb
->attr
& 1)) {
799 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
804 if (prot2
& PAGE_EXEC
) {
805 LOG_SWTLB("%s: good TLB!\n", __func__
);
809 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
812 if (msr_dr
!= (tlb
->attr
& 1)) {
813 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
818 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
819 LOG_SWTLB("%s: found TLB!\n", __func__
);
823 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
830 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
831 target_ulong address
, int rw
,
839 raddr
= (hwaddr
)-1ULL;
840 for (i
= 0; i
< env
->nb_tlb
; i
++) {
841 tlb
= &env
->tlb
.tlbe
[i
];
842 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
, rw
,
851 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
852 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
855 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
856 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
862 static void booke206_flush_tlb(CPUPPCState
*env
, int flags
,
863 const int check_iprot
)
867 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
869 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
870 if (flags
& (1 << i
)) {
871 tlb_size
= booke206_tlb_size(env
, i
);
872 for (j
= 0; j
< tlb_size
; j
++) {
873 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
874 tlb
[j
].mas1
&= ~MAS1_VALID
;
878 tlb
+= booke206_tlb_size(env
, i
);
881 tlb_flush(env_cpu(env
));
884 static hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
,
889 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
891 return 1024ULL << tlbm_size
;
894 /* TLB check function for MAS based SoftTLBs */
895 static int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
896 hwaddr
*raddrp
, target_ulong address
,
903 /* In 32bit mode we can only address 32bit EAs */
904 address
= (uint32_t)address
;
907 /* Check valid flag */
908 if (!(tlb
->mas1
& MAS1_VALID
)) {
912 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
913 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
914 PRIx64
" mask=0x%" HWADDR_PRIx
" MAS7_3=0x%" PRIx64
" MAS8=0x%"
915 PRIx32
"\n", __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
,
916 tlb
->mas7_3
, tlb
->mas8
);
919 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
920 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
924 /* Check effective address */
925 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
930 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
936 static bool is_epid_mmu(int mmu_idx
)
938 return mmu_idx
== PPC_TLB_EPID_STORE
|| mmu_idx
== PPC_TLB_EPID_LOAD
;
941 static uint32_t mmubooke206_esr(int mmu_idx
, bool rw
)
947 if (is_epid_mmu(mmu_idx
)) {
954 * Get EPID register given the mmu_idx. If this is regular load,
955 * construct the EPID access bits from current processor state
957 * Get the effective AS and PR bits and the PID. The PID is returned
958 * only if EPID load is requested, otherwise the caller must detect
959 * the correct EPID. Return true if valid EPID is returned.
961 static bool mmubooke206_get_as(CPUPPCState
*env
,
962 int mmu_idx
, uint32_t *epid_out
,
963 bool *as_out
, bool *pr_out
)
965 if (is_epid_mmu(mmu_idx
)) {
967 if (mmu_idx
== PPC_TLB_EPID_STORE
) {
968 epidr
= env
->spr
[SPR_BOOKE_EPSC
];
970 epidr
= env
->spr
[SPR_BOOKE_EPLC
];
972 *epid_out
= (epidr
& EPID_EPID
) >> EPID_EPID_SHIFT
;
973 *as_out
= !!(epidr
& EPID_EAS
);
974 *pr_out
= !!(epidr
& EPID_EPR
);
983 /* Check if the tlb found by hashing really matches */
984 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
985 hwaddr
*raddr
, int *prot
,
986 target_ulong address
, int rw
,
987 int access_type
, int mmu_idx
)
993 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
996 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
997 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
1001 if (env
->spr
[SPR_BOOKE_PID1
] &&
1002 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1003 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
1007 if (env
->spr
[SPR_BOOKE_PID2
] &&
1008 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1009 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
1013 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
, epid
) >= 0) {
1018 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1024 if (tlb
->mas7_3
& MAS3_UR
) {
1027 if (tlb
->mas7_3
& MAS3_UW
) {
1028 prot2
|= PAGE_WRITE
;
1030 if (tlb
->mas7_3
& MAS3_UX
) {
1034 if (tlb
->mas7_3
& MAS3_SR
) {
1037 if (tlb
->mas7_3
& MAS3_SW
) {
1038 prot2
|= PAGE_WRITE
;
1040 if (tlb
->mas7_3
& MAS3_SX
) {
1045 /* Check the address space and permissions */
1046 if (access_type
== ACCESS_CODE
) {
1047 /* There is no way to fetch code using epid load */
1049 if (msr_ir
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1050 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1055 if (prot2
& PAGE_EXEC
) {
1056 LOG_SWTLB("%s: good TLB!\n", __func__
);
1060 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1063 if (as
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1064 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1069 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1070 LOG_SWTLB("%s: found TLB!\n", __func__
);
1074 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1081 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1082 target_ulong address
, int rw
,
1083 int access_type
, int mmu_idx
)
1090 raddr
= (hwaddr
)-1ULL;
1092 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1093 int ways
= booke206_tlb_ways(env
, i
);
1095 for (j
= 0; j
< ways
; j
++) {
1096 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1100 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1101 rw
, access_type
, mmu_idx
);
1112 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1113 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1116 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1117 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1123 static const char *book3e_tsize_to_str
[32] = {
1124 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1125 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1126 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1130 static void mmubooke_dump_mmu(CPUPPCState
*env
)
1132 ppcemb_tlb_t
*entry
;
1135 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1136 qemu_printf("Cannot access KVM TLB\n");
1140 qemu_printf("\nTLB:\n");
1141 qemu_printf("Effective Physical Size PID Prot "
1144 entry
= &env
->tlb
.tlbe
[0];
1145 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
1148 uint64_t size
= (uint64_t)entry
->size
;
1151 /* Check valid flag */
1152 if (!(entry
->prot
& PAGE_VALID
)) {
1156 mask
= ~(entry
->size
- 1);
1157 ea
= entry
->EPN
& mask
;
1158 pa
= entry
->RPN
& mask
;
1159 /* Extend the physical address to 36 bits */
1160 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
1161 if (size
>= 1 * MiB
) {
1162 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ MiB
);
1164 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
/ KiB
);
1166 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
1167 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
1168 entry
->prot
, entry
->attr
);
1173 static void mmubooke206_dump_one_tlb(CPUPPCState
*env
, int tlbn
, int offset
,
1176 ppcmas_tlb_t
*entry
;
1179 qemu_printf("\nTLB%d:\n", tlbn
);
1180 qemu_printf("Effective Physical Size TID TS SRWX"
1181 " URWX WIMGE U0123\n");
1183 entry
= &env
->tlb
.tlbm
[offset
];
1184 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
1185 hwaddr ea
, pa
, size
;
1188 if (!(entry
->mas1
& MAS1_VALID
)) {
1192 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1193 size
= 1024ULL << tsize
;
1194 ea
= entry
->mas2
& ~(size
- 1);
1195 pa
= entry
->mas7_3
& ~(size
- 1);
1197 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
1198 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1199 (uint64_t)ea
, (uint64_t)pa
,
1200 book3e_tsize_to_str
[tsize
],
1201 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1202 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1203 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1204 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1205 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1206 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1207 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1208 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1209 entry
->mas2
& MAS2_W
? 'W' : '-',
1210 entry
->mas2
& MAS2_I
? 'I' : '-',
1211 entry
->mas2
& MAS2_M
? 'M' : '-',
1212 entry
->mas2
& MAS2_G
? 'G' : '-',
1213 entry
->mas2
& MAS2_E
? 'E' : '-',
1214 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1215 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1216 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1217 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1221 static void mmubooke206_dump_mmu(CPUPPCState
*env
)
1226 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1227 qemu_printf("Cannot access KVM TLB\n");
1231 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1232 int size
= booke206_tlb_size(env
, i
);
1238 mmubooke206_dump_one_tlb(env
, i
, offset
, size
);
1243 static void mmu6xx_dump_BATs(CPUPPCState
*env
, int type
)
1245 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
1246 target_ulong BEPIl
, BEPIu
, bl
;
1251 BATlt
= env
->IBAT
[1];
1252 BATut
= env
->IBAT
[0];
1255 BATlt
= env
->DBAT
[1];
1256 BATut
= env
->DBAT
[0];
1260 for (i
= 0; i
< env
->nb_BATs
; i
++) {
1263 BEPIu
= *BATu
& 0xF0000000;
1264 BEPIl
= *BATu
& 0x0FFE0000;
1265 bl
= (*BATu
& 0x00001FFC) << 15;
1266 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1267 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
1268 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
1269 type
== ACCESS_CODE
? "code" : "data", i
,
1270 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
1274 static void mmu6xx_dump_mmu(CPUPPCState
*env
)
1276 PowerPCCPU
*cpu
= env_archcpu(env
);
1279 int type
, way
, entry
, i
;
1281 qemu_printf("HTAB base = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_base(cpu
));
1282 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_mask(cpu
));
1284 qemu_printf("\nSegment registers:\n");
1285 for (i
= 0; i
< 32; i
++) {
1287 if (sr
& 0x80000000) {
1288 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1289 "CNTLR_SPEC=0x%05x\n", i
,
1290 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1291 sr
& 0x20000000 ? 1 : 0, (uint32_t)((sr
>> 20) & 0x1FF),
1292 (uint32_t)(sr
& 0xFFFFF));
1294 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i
,
1295 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1296 sr
& 0x20000000 ? 1 : 0, sr
& 0x10000000 ? 1 : 0,
1297 (uint32_t)(sr
& 0x00FFFFFF));
1301 qemu_printf("\nBATs:\n");
1302 mmu6xx_dump_BATs(env
, ACCESS_INT
);
1303 mmu6xx_dump_BATs(env
, ACCESS_CODE
);
1305 if (env
->id_tlbs
!= 1) {
1306 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1307 " for code and data\n");
1310 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1312 for (type
= 0; type
< 2; type
++) {
1313 for (way
= 0; way
< env
->nb_ways
; way
++) {
1314 for (entry
= env
->nb_tlb
* type
+ env
->tlb_per_way
* way
;
1315 entry
< (env
->nb_tlb
* type
+ env
->tlb_per_way
* (way
+ 1));
1318 tlb
= &env
->tlb
.tlb6
[entry
];
1319 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1320 TARGET_FMT_lx
" " TARGET_FMT_lx
"]\n",
1321 type
? "code" : "data", entry
% env
->nb_tlb
,
1323 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
1324 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
);
1330 void dump_mmu(CPUPPCState
*env
)
1332 switch (env
->mmu_model
) {
1333 case POWERPC_MMU_BOOKE
:
1334 mmubooke_dump_mmu(env
);
1336 case POWERPC_MMU_BOOKE206
:
1337 mmubooke206_dump_mmu(env
);
1339 case POWERPC_MMU_SOFT_6xx
:
1340 case POWERPC_MMU_SOFT_74xx
:
1341 mmu6xx_dump_mmu(env
);
1343 #if defined(TARGET_PPC64)
1344 case POWERPC_MMU_64B
:
1345 case POWERPC_MMU_2_03
:
1346 case POWERPC_MMU_2_06
:
1347 case POWERPC_MMU_2_07
:
1348 dump_slb(env_archcpu(env
));
1350 case POWERPC_MMU_3_00
:
1351 if (ppc64_v3_radix(env_archcpu(env
))) {
1352 qemu_log_mask(LOG_UNIMP
, "%s: the PPC64 MMU is unsupported\n",
1355 dump_slb(env_archcpu(env
));
1360 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1364 static inline int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1365 target_ulong eaddr
, int rw
)
1370 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1372 switch (env
->mmu_model
) {
1373 case POWERPC_MMU_SOFT_6xx
:
1374 case POWERPC_MMU_SOFT_74xx
:
1375 case POWERPC_MMU_SOFT_4xx
:
1376 case POWERPC_MMU_REAL
:
1377 case POWERPC_MMU_BOOKE
:
1378 ctx
->prot
|= PAGE_WRITE
;
1381 case POWERPC_MMU_SOFT_4xx_Z
:
1382 if (unlikely(msr_pe
!= 0)) {
1384 * 403 family add some particular protections, using
1385 * PBL/PBU registers for accesses with no translation.
1388 /* Check PLB validity */
1389 (env
->pb
[0] < env
->pb
[1] &&
1390 /* and address in plb area */
1391 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1392 (env
->pb
[2] < env
->pb
[3] &&
1393 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1394 if (in_plb
^ msr_px
) {
1395 /* Access in protected area */
1397 /* Access is not allowed */
1401 /* Read-write access is allowed */
1402 ctx
->prot
|= PAGE_WRITE
;
1408 /* Caller's checks mean we should never get here for other models */
1416 static int get_physical_address_wtlb(
1417 CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1418 target_ulong eaddr
, int rw
, int access_type
,
1422 bool real_mode
= (access_type
== ACCESS_CODE
&& msr_ir
== 0)
1423 || (access_type
!= ACCESS_CODE
&& msr_dr
== 0);
1425 switch (env
->mmu_model
) {
1426 case POWERPC_MMU_SOFT_6xx
:
1427 case POWERPC_MMU_SOFT_74xx
:
1429 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1431 /* Try to find a BAT */
1432 if (env
->nb_BATs
!= 0) {
1433 ret
= get_bat_6xx_tlb(env
, ctx
, eaddr
, rw
, access_type
);
1436 /* We didn't match any BAT entry or don't have BATs */
1437 ret
= get_segment_6xx_tlb(env
, ctx
, eaddr
, rw
, access_type
);
1442 case POWERPC_MMU_SOFT_4xx
:
1443 case POWERPC_MMU_SOFT_4xx_Z
:
1445 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1447 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1451 case POWERPC_MMU_BOOKE
:
1452 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1455 case POWERPC_MMU_BOOKE206
:
1456 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1457 access_type
, mmu_idx
);
1459 case POWERPC_MMU_MPC8xx
:
1461 cpu_abort(env_cpu(env
), "MPC8xx MMU model is not implemented\n");
1463 case POWERPC_MMU_REAL
:
1465 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1467 cpu_abort(env_cpu(env
),
1468 "PowerPC in real mode do not do any translation\n");
1472 cpu_abort(env_cpu(env
), "Unknown or invalid MMU model\n");
1479 static int get_physical_address(
1480 CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1481 target_ulong eaddr
, int rw
, int access_type
)
1483 return get_physical_address_wtlb(env
, ctx
, eaddr
, rw
, access_type
, 0);
1486 hwaddr
ppc_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
1488 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1489 CPUPPCState
*env
= &cpu
->env
;
1492 switch (env
->mmu_model
) {
1493 #if defined(TARGET_PPC64)
1494 case POWERPC_MMU_64B
:
1495 case POWERPC_MMU_2_03
:
1496 case POWERPC_MMU_2_06
:
1497 case POWERPC_MMU_2_07
:
1498 return ppc_hash64_get_phys_page_debug(cpu
, addr
);
1499 case POWERPC_MMU_3_00
:
1500 return ppc64_v3_get_phys_page_debug(cpu
, addr
);
1503 case POWERPC_MMU_32B
:
1504 case POWERPC_MMU_601
:
1505 return ppc_hash32_get_phys_page_debug(cpu
, addr
);
1511 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) != 0)) {
1514 * Some MMUs have separate TLBs for code and data. If we only
1515 * try an ACCESS_INT, we may not be able to read instructions
1516 * mapped by code TLBs, so we also try a ACCESS_CODE.
1518 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0,
1519 ACCESS_CODE
) != 0)) {
1524 return ctx
.raddr
& TARGET_PAGE_MASK
;
1527 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1528 int rw
, int mmu_idx
)
1532 uint32_t missed_tid
= 0;
1533 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
1537 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1538 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1539 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1540 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1541 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1542 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1546 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1547 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1550 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1551 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1554 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1555 case MAS4_TIDSELD_PID0
:
1556 missed_tid
= env
->spr
[SPR_BOOKE_PID
];
1558 case MAS4_TIDSELD_PID1
:
1559 missed_tid
= env
->spr
[SPR_BOOKE_PID1
];
1561 case MAS4_TIDSELD_PID2
:
1562 missed_tid
= env
->spr
[SPR_BOOKE_PID2
];
1565 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1568 env
->spr
[SPR_BOOKE_MAS6
] |= missed_tid
<< 16;
1570 env
->spr
[SPR_BOOKE_MAS1
] |= (missed_tid
<< MAS1_TID_SHIFT
);
1573 /* next victim logic */
1574 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1576 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1577 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1580 /* Perform address translation */
1581 static int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
,
1582 int rw
, int mmu_idx
)
1584 CPUState
*cs
= env_cpu(env
);
1585 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1593 access_type
= ACCESS_CODE
;
1596 access_type
= env
->access_type
;
1598 ret
= get_physical_address_wtlb(env
, &ctx
, address
, rw
,
1599 access_type
, mmu_idx
);
1601 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
1602 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1603 mmu_idx
, TARGET_PAGE_SIZE
);
1605 } else if (ret
< 0) {
1607 if (access_type
== ACCESS_CODE
) {
1610 /* No matches in page tables or TLB */
1611 switch (env
->mmu_model
) {
1612 case POWERPC_MMU_SOFT_6xx
:
1613 cs
->exception_index
= POWERPC_EXCP_IFTLB
;
1614 env
->error_code
= 1 << 18;
1615 env
->spr
[SPR_IMISS
] = address
;
1616 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1618 case POWERPC_MMU_SOFT_74xx
:
1619 cs
->exception_index
= POWERPC_EXCP_IFTLB
;
1621 case POWERPC_MMU_SOFT_4xx
:
1622 case POWERPC_MMU_SOFT_4xx_Z
:
1623 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1624 env
->error_code
= 0;
1625 env
->spr
[SPR_40x_DEAR
] = address
;
1626 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1628 case POWERPC_MMU_BOOKE206
:
1629 booke206_update_mas_tlb_miss(env
, address
, 2, mmu_idx
);
1631 case POWERPC_MMU_BOOKE
:
1632 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1633 env
->error_code
= 0;
1634 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1635 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, 0);
1637 case POWERPC_MMU_MPC8xx
:
1639 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1641 case POWERPC_MMU_REAL
:
1642 cpu_abort(cs
, "PowerPC in real mode should never raise "
1643 "any MMU exceptions\n");
1646 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1651 /* Access rights violation */
1652 cs
->exception_index
= POWERPC_EXCP_ISI
;
1653 env
->error_code
= 0x08000000;
1656 /* No execute protection violation */
1657 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1658 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1659 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1661 cs
->exception_index
= POWERPC_EXCP_ISI
;
1662 env
->error_code
= 0x10000000;
1665 /* Direct store exception */
1666 /* No code fetch is allowed in direct-store areas */
1667 cs
->exception_index
= POWERPC_EXCP_ISI
;
1668 env
->error_code
= 0x10000000;
1674 /* No matches in page tables or TLB */
1675 switch (env
->mmu_model
) {
1676 case POWERPC_MMU_SOFT_6xx
:
1678 cs
->exception_index
= POWERPC_EXCP_DSTLB
;
1679 env
->error_code
= 1 << 16;
1681 cs
->exception_index
= POWERPC_EXCP_DLTLB
;
1682 env
->error_code
= 0;
1684 env
->spr
[SPR_DMISS
] = address
;
1685 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1687 env
->error_code
|= ctx
.key
<< 19;
1688 env
->spr
[SPR_HASH1
] = ppc_hash32_hpt_base(cpu
) +
1689 get_pteg_offset32(cpu
, ctx
.hash
[0]);
1690 env
->spr
[SPR_HASH2
] = ppc_hash32_hpt_base(cpu
) +
1691 get_pteg_offset32(cpu
, ctx
.hash
[1]);
1693 case POWERPC_MMU_SOFT_74xx
:
1695 cs
->exception_index
= POWERPC_EXCP_DSTLB
;
1697 cs
->exception_index
= POWERPC_EXCP_DLTLB
;
1700 /* Implement LRU algorithm */
1701 env
->error_code
= ctx
.key
<< 19;
1702 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1703 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1704 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1706 case POWERPC_MMU_SOFT_4xx
:
1707 case POWERPC_MMU_SOFT_4xx_Z
:
1708 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1709 env
->error_code
= 0;
1710 env
->spr
[SPR_40x_DEAR
] = address
;
1712 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1714 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1717 case POWERPC_MMU_MPC8xx
:
1719 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1721 case POWERPC_MMU_BOOKE206
:
1722 booke206_update_mas_tlb_miss(env
, address
, rw
, mmu_idx
);
1724 case POWERPC_MMU_BOOKE
:
1725 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1726 env
->error_code
= 0;
1727 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1728 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, rw
);
1730 case POWERPC_MMU_REAL
:
1731 cpu_abort(cs
, "PowerPC in real mode should never raise "
1732 "any MMU exceptions\n");
1735 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1740 /* Access rights violation */
1741 cs
->exception_index
= POWERPC_EXCP_DSI
;
1742 env
->error_code
= 0;
1743 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
1744 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
1745 env
->spr
[SPR_40x_DEAR
] = address
;
1747 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
1749 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1750 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1751 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1752 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, rw
);
1754 env
->spr
[SPR_DAR
] = address
;
1756 env
->spr
[SPR_DSISR
] = 0x0A000000;
1758 env
->spr
[SPR_DSISR
] = 0x08000000;
1763 /* Direct store exception */
1764 switch (access_type
) {
1766 /* Floating point load/store */
1767 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
1768 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
1769 env
->spr
[SPR_DAR
] = address
;
1772 /* lwarx, ldarx or stwcx. */
1773 cs
->exception_index
= POWERPC_EXCP_DSI
;
1774 env
->error_code
= 0;
1775 env
->spr
[SPR_DAR
] = address
;
1777 env
->spr
[SPR_DSISR
] = 0x06000000;
1779 env
->spr
[SPR_DSISR
] = 0x04000000;
1783 /* eciwx or ecowx */
1784 cs
->exception_index
= POWERPC_EXCP_DSI
;
1785 env
->error_code
= 0;
1786 env
->spr
[SPR_DAR
] = address
;
1788 env
->spr
[SPR_DSISR
] = 0x06100000;
1790 env
->spr
[SPR_DSISR
] = 0x04100000;
1794 printf("DSI: invalid exception (%d)\n", ret
);
1795 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
1797 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
1798 env
->spr
[SPR_DAR
] = address
;
1810 /*****************************************************************************/
1811 /* BATs management */
1812 #if !defined(FLUSH_ALL_TLBS)
1813 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
1816 CPUState
*cs
= env_cpu(env
);
1817 target_ulong base
, end
, page
;
1819 base
= BATu
& ~0x0001FFFF;
1820 end
= base
+ mask
+ 0x00020000;
1821 if (((end
- base
) >> TARGET_PAGE_BITS
) > 1024) {
1822 /* Flushing 1024 4K pages is slower than a complete flush */
1823 LOG_BATS("Flush all BATs\n");
1825 LOG_BATS("Flush done\n");
1828 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
1829 TARGET_FMT_lx
")\n", base
, end
, mask
);
1830 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
1831 tlb_flush_page(cs
, page
);
1833 LOG_BATS("Flush done\n");
1837 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
1840 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
1841 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
1844 void helper_store_ibatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1847 #if defined(FLUSH_ALL_TLBS)
1848 PowerPCCPU
*cpu
= env_archcpu(env
);
1851 dump_store_bat(env
, 'I', 0, nr
, value
);
1852 if (env
->IBAT
[0][nr
] != value
) {
1853 mask
= (value
<< 15) & 0x0FFE0000UL
;
1854 #if !defined(FLUSH_ALL_TLBS)
1855 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1858 * When storing valid upper BAT, mask BEPI and BRPN and
1859 * invalidate all TLBs covered by this BAT
1861 mask
= (value
<< 15) & 0x0FFE0000UL
;
1862 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1863 (value
& ~0x0001FFFFUL
& ~mask
);
1864 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
1865 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
1866 #if !defined(FLUSH_ALL_TLBS)
1867 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1869 tlb_flush(env_cpu(env
));
1874 void helper_store_ibatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1876 dump_store_bat(env
, 'I', 1, nr
, value
);
1877 env
->IBAT
[1][nr
] = value
;
1880 void helper_store_dbatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1883 #if defined(FLUSH_ALL_TLBS)
1884 PowerPCCPU
*cpu
= env_archcpu(env
);
1887 dump_store_bat(env
, 'D', 0, nr
, value
);
1888 if (env
->DBAT
[0][nr
] != value
) {
1890 * When storing valid upper BAT, mask BEPI and BRPN and
1891 * invalidate all TLBs covered by this BAT
1893 mask
= (value
<< 15) & 0x0FFE0000UL
;
1894 #if !defined(FLUSH_ALL_TLBS)
1895 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
1897 mask
= (value
<< 15) & 0x0FFE0000UL
;
1898 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1899 (value
& ~0x0001FFFFUL
& ~mask
);
1900 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
1901 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
1902 #if !defined(FLUSH_ALL_TLBS)
1903 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
1905 tlb_flush(env_cpu(env
));
1910 void helper_store_dbatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1912 dump_store_bat(env
, 'D', 1, nr
, value
);
1913 env
->DBAT
[1][nr
] = value
;
1916 void helper_store_601_batu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1919 #if defined(FLUSH_ALL_TLBS)
1920 PowerPCCPU
*cpu
= env_archcpu(env
);
1924 dump_store_bat(env
, 'I', 0, nr
, value
);
1925 if (env
->IBAT
[0][nr
] != value
) {
1926 #if defined(FLUSH_ALL_TLBS)
1929 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
1930 if (env
->IBAT
[1][nr
] & 0x40) {
1931 /* Invalidate BAT only if it is valid */
1932 #if !defined(FLUSH_ALL_TLBS)
1933 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1939 * When storing valid upper BAT, mask BEPI and BRPN and
1940 * invalidate all TLBs covered by this BAT
1942 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1943 (value
& ~0x0001FFFFUL
& ~mask
);
1944 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
1945 if (env
->IBAT
[1][nr
] & 0x40) {
1946 #if !defined(FLUSH_ALL_TLBS)
1947 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1952 #if defined(FLUSH_ALL_TLBS)
1954 tlb_flush(env_cpu(env
));
1960 void helper_store_601_batl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1962 #if !defined(FLUSH_ALL_TLBS)
1965 PowerPCCPU
*cpu
= env_archcpu(env
);
1969 dump_store_bat(env
, 'I', 1, nr
, value
);
1970 if (env
->IBAT
[1][nr
] != value
) {
1971 #if defined(FLUSH_ALL_TLBS)
1974 if (env
->IBAT
[1][nr
] & 0x40) {
1975 #if !defined(FLUSH_ALL_TLBS)
1976 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
1977 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1983 #if !defined(FLUSH_ALL_TLBS)
1984 mask
= (value
<< 17) & 0x0FFE0000UL
;
1985 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1990 env
->IBAT
[1][nr
] = value
;
1991 env
->DBAT
[1][nr
] = value
;
1992 #if defined(FLUSH_ALL_TLBS)
1994 tlb_flush(env_cpu(env
));
2000 /*****************************************************************************/
2001 /* TLB management */
2002 void ppc_tlb_invalidate_all(CPUPPCState
*env
)
2004 #if defined(TARGET_PPC64)
2005 if (mmu_is_64bit(env
->mmu_model
)) {
2006 env
->tlb_need_flush
= 0;
2007 tlb_flush(env_cpu(env
));
2009 #endif /* defined(TARGET_PPC64) */
2010 switch (env
->mmu_model
) {
2011 case POWERPC_MMU_SOFT_6xx
:
2012 case POWERPC_MMU_SOFT_74xx
:
2013 ppc6xx_tlb_invalidate_all(env
);
2015 case POWERPC_MMU_SOFT_4xx
:
2016 case POWERPC_MMU_SOFT_4xx_Z
:
2017 ppc4xx_tlb_invalidate_all(env
);
2019 case POWERPC_MMU_REAL
:
2020 cpu_abort(env_cpu(env
), "No TLB for PowerPC 4xx in real mode\n");
2022 case POWERPC_MMU_MPC8xx
:
2024 cpu_abort(env_cpu(env
), "MPC8xx MMU model is not implemented\n");
2026 case POWERPC_MMU_BOOKE
:
2027 tlb_flush(env_cpu(env
));
2029 case POWERPC_MMU_BOOKE206
:
2030 booke206_flush_tlb(env
, -1, 0);
2032 case POWERPC_MMU_32B
:
2033 case POWERPC_MMU_601
:
2034 env
->tlb_need_flush
= 0;
2035 tlb_flush(env_cpu(env
));
2039 cpu_abort(env_cpu(env
), "Unknown MMU model %x\n", env
->mmu_model
);
2044 void ppc_tlb_invalidate_one(CPUPPCState
*env
, target_ulong addr
)
2046 #if !defined(FLUSH_ALL_TLBS)
2047 addr
&= TARGET_PAGE_MASK
;
2048 #if defined(TARGET_PPC64)
2049 if (mmu_is_64bit(env
->mmu_model
)) {
2050 /* tlbie invalidate TLBs for all segments */
2052 * XXX: given the fact that there are too many segments to invalidate,
2053 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2054 * we just invalidate all TLBs
2056 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
2058 #endif /* defined(TARGET_PPC64) */
2059 switch (env
->mmu_model
) {
2060 case POWERPC_MMU_SOFT_6xx
:
2061 case POWERPC_MMU_SOFT_74xx
:
2062 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
2063 if (env
->id_tlbs
== 1) {
2064 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
2067 case POWERPC_MMU_32B
:
2068 case POWERPC_MMU_601
:
2070 * Actual CPUs invalidate entire congruence classes based on
2071 * the geometry of their TLBs and some OSes take that into
2072 * account, we just mark the TLB to be flushed later (context
2073 * synchronizing event or sync instruction on 32-bit).
2075 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
2078 /* Should never reach here with other MMU models */
2082 ppc_tlb_invalidate_all(env
);
2086 /*****************************************************************************/
2087 /* Special registers manipulation */
2088 void ppc_store_sdr1(CPUPPCState
*env
, target_ulong value
)
2090 PowerPCCPU
*cpu
= env_archcpu(env
);
2091 qemu_log_mask(CPU_LOG_MMU
, "%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2093 #if defined(TARGET_PPC64)
2094 if (mmu_is_64bit(env
->mmu_model
)) {
2095 target_ulong sdr_mask
= SDR_64_HTABORG
| SDR_64_HTABSIZE
;
2096 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
2098 if (value
& ~sdr_mask
) {
2099 error_report("Invalid bits 0x"TARGET_FMT_lx
" set in SDR1",
2103 if (htabsize
> 28) {
2104 error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx
" stored in SDR1",
2109 #endif /* defined(TARGET_PPC64) */
2110 /* FIXME: Should check for valid HTABMASK values in 32-bit case */
2111 env
->spr
[SPR_SDR1
] = value
;
2114 #if defined(TARGET_PPC64)
2115 void ppc_store_ptcr(CPUPPCState
*env
, target_ulong value
)
2117 PowerPCCPU
*cpu
= env_archcpu(env
);
2118 target_ulong ptcr_mask
= PTCR_PATB
| PTCR_PATS
;
2119 target_ulong patbsize
= value
& PTCR_PATS
;
2121 qemu_log_mask(CPU_LOG_MMU
, "%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2124 assert(env
->mmu_model
& POWERPC_MMU_3_00
);
2126 if (value
& ~ptcr_mask
) {
2127 error_report("Invalid bits 0x"TARGET_FMT_lx
" set in PTCR",
2128 value
& ~ptcr_mask
);
2132 if (patbsize
> 24) {
2133 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx
2134 " stored in PTCR", patbsize
);
2138 env
->spr
[SPR_PTCR
] = value
;
2141 #endif /* defined(TARGET_PPC64) */
2143 /* Segment registers load and store */
2144 target_ulong
helper_load_sr(CPUPPCState
*env
, target_ulong sr_num
)
2146 #if defined(TARGET_PPC64)
2147 if (mmu_is_64bit(env
->mmu_model
)) {
2152 return env
->sr
[sr_num
];
2155 void helper_store_sr(CPUPPCState
*env
, target_ulong srnum
, target_ulong value
)
2157 qemu_log_mask(CPU_LOG_MMU
,
2158 "%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2159 (int)srnum
, value
, env
->sr
[srnum
]);
2160 #if defined(TARGET_PPC64)
2161 if (mmu_is_64bit(env
->mmu_model
)) {
2162 PowerPCCPU
*cpu
= env_archcpu(env
);
2163 uint64_t esid
, vsid
;
2166 esid
= ((uint64_t)(srnum
& 0xf) << 28) | SLB_ESID_V
;
2169 vsid
= (value
& 0xfffffff) << 12;
2171 vsid
|= ((value
>> 27) & 0xf) << 8;
2173 ppc_store_slb(cpu
, srnum
, esid
, vsid
);
2176 if (env
->sr
[srnum
] != value
) {
2177 env
->sr
[srnum
] = value
;
2179 * Invalidating 256MB of virtual memory in 4kB pages is way
2180 * longer than flushing the whole TLB.
2182 #if !defined(FLUSH_ALL_TLBS) && 0
2184 target_ulong page
, end
;
2185 /* Invalidate 256 MB of virtual memory */
2186 page
= (16 << 20) * srnum
;
2187 end
= page
+ (16 << 20);
2188 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2189 tlb_flush_page(env_cpu(env
), page
);
2193 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
2198 /* TLB management */
2199 void helper_tlbia(CPUPPCState
*env
)
2201 ppc_tlb_invalidate_all(env
);
2204 void helper_tlbie(CPUPPCState
*env
, target_ulong addr
)
2206 ppc_tlb_invalidate_one(env
, addr
);
2209 void helper_tlbiva(CPUPPCState
*env
, target_ulong addr
)
2211 /* tlbiva instruction only exists on BookE */
2212 assert(env
->mmu_model
== POWERPC_MMU_BOOKE
);
2214 cpu_abort(env_cpu(env
), "BookE MMU model is not implemented\n");
2217 /* Software driven TLBs management */
2218 /* PowerPC 602/603 software TLB load instructions helpers */
2219 static void do_6xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2221 target_ulong RPN
, CMP
, EPN
;
2224 RPN
= env
->spr
[SPR_RPA
];
2226 CMP
= env
->spr
[SPR_ICMP
];
2227 EPN
= env
->spr
[SPR_IMISS
];
2229 CMP
= env
->spr
[SPR_DCMP
];
2230 EPN
= env
->spr
[SPR_DMISS
];
2232 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
2233 (void)EPN
; /* avoid a compiler warning */
2234 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2235 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2237 /* Store this TLB */
2238 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2239 way
, is_code
, CMP
, RPN
);
2242 void helper_6xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2244 do_6xx_tlb(env
, EPN
, 0);
2247 void helper_6xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2249 do_6xx_tlb(env
, EPN
, 1);
2252 /* PowerPC 74xx software TLB load instructions helpers */
2253 static void do_74xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2255 target_ulong RPN
, CMP
, EPN
;
2258 RPN
= env
->spr
[SPR_PTELO
];
2259 CMP
= env
->spr
[SPR_PTEHI
];
2260 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
2261 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
2262 (void)EPN
; /* avoid a compiler warning */
2263 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2264 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2266 /* Store this TLB */
2267 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2268 way
, is_code
, CMP
, RPN
);
2271 void helper_74xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2273 do_74xx_tlb(env
, EPN
, 0);
2276 void helper_74xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2278 do_74xx_tlb(env
, EPN
, 1);
2281 /*****************************************************************************/
2282 /* PowerPC 601 specific instructions (POWER bridge) */
2284 target_ulong
helper_rac(CPUPPCState
*env
, target_ulong addr
)
2288 target_ulong ret
= 0;
2291 * We don't have to generate many instances of this instruction,
2292 * as rac is supervisor only.
2294 * XXX: FIX THIS: Pretend we have no BAT
2296 nb_BATs
= env
->nb_BATs
;
2298 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0) {
2301 env
->nb_BATs
= nb_BATs
;
2305 static inline target_ulong
booke_tlb_to_page_size(int size
)
2307 return 1024 << (2 * size
);
2310 static inline int booke_page_size_to_tlb(target_ulong page_size
)
2314 switch (page_size
) {
2348 #if defined(TARGET_PPC64)
2349 case 0x000100000000ULL
:
2352 case 0x000400000000ULL
:
2355 case 0x001000000000ULL
:
2358 case 0x004000000000ULL
:
2361 case 0x010000000000ULL
:
2373 /* Helpers for 4xx TLB management */
2374 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2376 #define PPC4XX_TLBHI_V 0x00000040
2377 #define PPC4XX_TLBHI_E 0x00000020
2378 #define PPC4XX_TLBHI_SIZE_MIN 0
2379 #define PPC4XX_TLBHI_SIZE_MAX 7
2380 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2381 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2382 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2384 #define PPC4XX_TLBLO_EX 0x00000200
2385 #define PPC4XX_TLBLO_WR 0x00000100
2386 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2387 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2389 target_ulong
helper_4xx_tlbre_hi(CPUPPCState
*env
, target_ulong entry
)
2395 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2396 tlb
= &env
->tlb
.tlbe
[entry
];
2398 if (tlb
->prot
& PAGE_VALID
) {
2399 ret
|= PPC4XX_TLBHI_V
;
2401 size
= booke_page_size_to_tlb(tlb
->size
);
2402 if (size
< PPC4XX_TLBHI_SIZE_MIN
|| size
> PPC4XX_TLBHI_SIZE_MAX
) {
2403 size
= PPC4XX_TLBHI_SIZE_DEFAULT
;
2405 ret
|= size
<< PPC4XX_TLBHI_SIZE_SHIFT
;
2406 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
2410 target_ulong
helper_4xx_tlbre_lo(CPUPPCState
*env
, target_ulong entry
)
2415 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2416 tlb
= &env
->tlb
.tlbe
[entry
];
2418 if (tlb
->prot
& PAGE_EXEC
) {
2419 ret
|= PPC4XX_TLBLO_EX
;
2421 if (tlb
->prot
& PAGE_WRITE
) {
2422 ret
|= PPC4XX_TLBLO_WR
;
2427 void helper_4xx_tlbwe_hi(CPUPPCState
*env
, target_ulong entry
,
2430 CPUState
*cs
= env_cpu(env
);
2432 target_ulong page
, end
;
2434 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2436 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2437 tlb
= &env
->tlb
.tlbe
[entry
];
2438 /* Invalidate previous TLB (if it's valid) */
2439 if (tlb
->prot
& PAGE_VALID
) {
2440 end
= tlb
->EPN
+ tlb
->size
;
2441 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
2442 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2443 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2444 tlb_flush_page(cs
, page
);
2447 tlb
->size
= booke_tlb_to_page_size((val
>> PPC4XX_TLBHI_SIZE_SHIFT
)
2448 & PPC4XX_TLBHI_SIZE_MASK
);
2450 * We cannot handle TLB size < TARGET_PAGE_SIZE.
2451 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
2453 if ((val
& PPC4XX_TLBHI_V
) && tlb
->size
< TARGET_PAGE_SIZE
) {
2454 cpu_abort(cs
, "TLB size " TARGET_FMT_lu
" < %u "
2455 "are not supported (%d)\n"
2456 "Please implement TARGET_PAGE_BITS_VARY\n",
2457 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
2459 tlb
->EPN
= val
& ~(tlb
->size
- 1);
2460 if (val
& PPC4XX_TLBHI_V
) {
2461 tlb
->prot
|= PAGE_VALID
;
2462 if (val
& PPC4XX_TLBHI_E
) {
2463 /* XXX: TO BE FIXED */
2465 "Little-endian TLB entries are not supported by now\n");
2468 tlb
->prot
&= ~PAGE_VALID
;
2470 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
2471 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2472 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2473 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2474 tlb
->prot
& PAGE_READ
? 'r' : '-',
2475 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2476 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2477 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2478 /* Invalidate new TLB (if valid) */
2479 if (tlb
->prot
& PAGE_VALID
) {
2480 end
= tlb
->EPN
+ tlb
->size
;
2481 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
2482 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2483 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2484 tlb_flush_page(cs
, page
);
2489 void helper_4xx_tlbwe_lo(CPUPPCState
*env
, target_ulong entry
,
2494 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2496 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2497 tlb
= &env
->tlb
.tlbe
[entry
];
2498 tlb
->attr
= val
& PPC4XX_TLBLO_ATTR_MASK
;
2499 tlb
->RPN
= val
& PPC4XX_TLBLO_RPN_MASK
;
2500 tlb
->prot
= PAGE_READ
;
2501 if (val
& PPC4XX_TLBLO_EX
) {
2502 tlb
->prot
|= PAGE_EXEC
;
2504 if (val
& PPC4XX_TLBLO_WR
) {
2505 tlb
->prot
|= PAGE_WRITE
;
2507 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2508 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2509 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2510 tlb
->prot
& PAGE_READ
? 'r' : '-',
2511 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2512 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2513 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2516 target_ulong
helper_4xx_tlbsx(CPUPPCState
*env
, target_ulong address
)
2518 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
2521 /* PowerPC 440 TLB management */
2522 void helper_440_tlbwe(CPUPPCState
*env
, uint32_t word
, target_ulong entry
,
2526 target_ulong EPN
, RPN
, size
;
2529 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
2530 __func__
, word
, (int)entry
, value
);
2533 tlb
= &env
->tlb
.tlbe
[entry
];
2536 /* Just here to please gcc */
2538 EPN
= value
& 0xFFFFFC00;
2539 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
) {
2543 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
2544 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
) {
2549 tlb
->attr
|= (value
>> 8) & 1;
2550 if (value
& 0x200) {
2551 tlb
->prot
|= PAGE_VALID
;
2553 if (tlb
->prot
& PAGE_VALID
) {
2554 tlb
->prot
&= ~PAGE_VALID
;
2558 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
2559 if (do_flush_tlbs
) {
2560 tlb_flush(env_cpu(env
));
2564 RPN
= value
& 0xFFFFFC0F;
2565 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
) {
2566 tlb_flush(env_cpu(env
));
2571 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
2572 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
2574 tlb
->prot
|= PAGE_READ
<< 4;
2577 tlb
->prot
|= PAGE_WRITE
<< 4;
2580 tlb
->prot
|= PAGE_EXEC
<< 4;
2583 tlb
->prot
|= PAGE_READ
;
2586 tlb
->prot
|= PAGE_WRITE
;
2589 tlb
->prot
|= PAGE_EXEC
;
2595 target_ulong
helper_440_tlbre(CPUPPCState
*env
, uint32_t word
,
2603 tlb
= &env
->tlb
.tlbe
[entry
];
2606 /* Just here to please gcc */
2609 size
= booke_page_size_to_tlb(tlb
->size
);
2610 if (size
< 0 || size
> 0xF) {
2614 if (tlb
->attr
& 0x1) {
2617 if (tlb
->prot
& PAGE_VALID
) {
2620 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
2621 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
2627 ret
= tlb
->attr
& ~0x1;
2628 if (tlb
->prot
& (PAGE_READ
<< 4)) {
2631 if (tlb
->prot
& (PAGE_WRITE
<< 4)) {
2634 if (tlb
->prot
& (PAGE_EXEC
<< 4)) {
2637 if (tlb
->prot
& PAGE_READ
) {
2640 if (tlb
->prot
& PAGE_WRITE
) {
2643 if (tlb
->prot
& PAGE_EXEC
) {
2651 target_ulong
helper_440_tlbsx(CPUPPCState
*env
, target_ulong address
)
2653 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
2656 /* PowerPC BookE 2.06 TLB management */
2658 static ppcmas_tlb_t
*booke206_cur_tlb(CPUPPCState
*env
)
2660 uint32_t tlbncfg
= 0;
2661 int esel
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ESEL_MASK
) >> MAS0_ESEL_SHIFT
;
2662 int ea
= (env
->spr
[SPR_BOOKE_MAS2
] & MAS2_EPN_MASK
);
2665 tlb
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
2666 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlb
];
2668 if ((tlbncfg
& TLBnCFG_HES
) && (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_HES
)) {
2669 cpu_abort(env_cpu(env
), "we don't support HES yet\n");
2672 return booke206_get_tlbm(env
, tlb
, ea
, esel
);
2675 void helper_booke_setpid(CPUPPCState
*env
, uint32_t pidn
, target_ulong pid
)
2677 env
->spr
[pidn
] = pid
;
2678 /* changing PIDs mean we're in a different address space now */
2679 tlb_flush(env_cpu(env
));
2682 void helper_booke_set_eplc(CPUPPCState
*env
, target_ulong val
)
2684 env
->spr
[SPR_BOOKE_EPLC
] = val
& EPID_MASK
;
2685 tlb_flush_by_mmuidx(env_cpu(env
), 1 << PPC_TLB_EPID_LOAD
);
2687 void helper_booke_set_epsc(CPUPPCState
*env
, target_ulong val
)
2689 env
->spr
[SPR_BOOKE_EPSC
] = val
& EPID_MASK
;
2690 tlb_flush_by_mmuidx(env_cpu(env
), 1 << PPC_TLB_EPID_STORE
);
2693 static inline void flush_page(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
2695 if (booke206_tlb_to_page_size(env
, tlb
) == TARGET_PAGE_SIZE
) {
2696 tlb_flush_page(env_cpu(env
), tlb
->mas2
& MAS2_EPN_MASK
);
2698 tlb_flush(env_cpu(env
));
2702 void helper_booke206_tlbwe(CPUPPCState
*env
)
2704 uint32_t tlbncfg
, tlbn
;
2706 uint32_t size_tlb
, size_ps
;
2710 switch (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_WQ_MASK
) {
2711 case MAS0_WQ_ALWAYS
:
2712 /* good to go, write that entry */
2715 /* XXX check if reserved */
2720 case MAS0_WQ_CLR_RSRV
:
2721 /* XXX clear entry */
2724 /* no idea what to do */
2728 if (((env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ATSEL
) == MAS0_ATSEL_LRAT
) &&
2730 /* XXX we don't support direct LRAT setting yet */
2731 fprintf(stderr
, "cpu: don't support LRAT setting yet\n");
2735 tlbn
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
2736 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
2738 tlb
= booke206_cur_tlb(env
);
2741 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
2742 POWERPC_EXCP_INVAL
|
2743 POWERPC_EXCP_INVAL_INVAL
, GETPC());
2746 /* check that we support the targeted size */
2747 size_tlb
= (env
->spr
[SPR_BOOKE_MAS1
] & MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
2748 size_ps
= booke206_tlbnps(env
, tlbn
);
2749 if ((env
->spr
[SPR_BOOKE_MAS1
] & MAS1_VALID
) && (tlbncfg
& TLBnCFG_AVAIL
) &&
2750 !(size_ps
& (1 << size_tlb
))) {
2751 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
2752 POWERPC_EXCP_INVAL
|
2753 POWERPC_EXCP_INVAL_INVAL
, GETPC());
2757 cpu_abort(env_cpu(env
), "missing HV implementation\n");
2760 if (tlb
->mas1
& MAS1_VALID
) {
2762 * Invalidate the page in QEMU TLB if it was a valid entry.
2764 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
2765 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
2766 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
2768 * "Note that when an L2 TLB entry is written, it may be displacing an
2769 * already valid entry in the same L2 TLB location (a victim). If a
2770 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
2771 * TLB entry is automatically invalidated."
2773 flush_page(env
, tlb
);
2776 tlb
->mas7_3
= ((uint64_t)env
->spr
[SPR_BOOKE_MAS7
] << 32) |
2777 env
->spr
[SPR_BOOKE_MAS3
];
2778 tlb
->mas1
= env
->spr
[SPR_BOOKE_MAS1
];
2780 if ((env
->spr
[SPR_MMUCFG
] & MMUCFG_MAVN
) == MMUCFG_MAVN_V2
) {
2781 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
2782 booke206_fixed_size_tlbn(env
, tlbn
, tlb
);
2784 if (!(tlbncfg
& TLBnCFG_AVAIL
)) {
2785 /* force !AVAIL TLB entries to correct page size */
2786 tlb
->mas1
&= ~MAS1_TSIZE_MASK
;
2787 /* XXX can be configured in MMUCSR0 */
2788 tlb
->mas1
|= (tlbncfg
& TLBnCFG_MINSIZE
) >> 12;
2792 /* Make a mask from TLB size to discard invalid bits in EPN field */
2793 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
2794 /* Add a mask for page attributes */
2795 mask
|= MAS2_ACM
| MAS2_VLE
| MAS2_W
| MAS2_I
| MAS2_M
| MAS2_G
| MAS2_E
;
2799 * Executing a tlbwe instruction in 32-bit mode will set bits
2800 * 0:31 of the TLB EPN field to zero.
2805 tlb
->mas2
= env
->spr
[SPR_BOOKE_MAS2
] & mask
;
2807 if (!(tlbncfg
& TLBnCFG_IPROT
)) {
2808 /* no IPROT supported by TLB */
2809 tlb
->mas1
&= ~MAS1_IPROT
;
2812 flush_page(env
, tlb
);
2815 static inline void booke206_tlb_to_mas(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
2817 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
2818 int way
= booke206_tlbm_to_way(env
, tlb
);
2820 env
->spr
[SPR_BOOKE_MAS0
] = tlbn
<< MAS0_TLBSEL_SHIFT
;
2821 env
->spr
[SPR_BOOKE_MAS0
] |= way
<< MAS0_ESEL_SHIFT
;
2822 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
2824 env
->spr
[SPR_BOOKE_MAS1
] = tlb
->mas1
;
2825 env
->spr
[SPR_BOOKE_MAS2
] = tlb
->mas2
;
2826 env
->spr
[SPR_BOOKE_MAS3
] = tlb
->mas7_3
;
2827 env
->spr
[SPR_BOOKE_MAS7
] = tlb
->mas7_3
>> 32;
2830 void helper_booke206_tlbre(CPUPPCState
*env
)
2832 ppcmas_tlb_t
*tlb
= NULL
;
2834 tlb
= booke206_cur_tlb(env
);
2836 env
->spr
[SPR_BOOKE_MAS1
] = 0;
2838 booke206_tlb_to_mas(env
, tlb
);
2842 void helper_booke206_tlbsx(CPUPPCState
*env
, target_ulong address
)
2844 ppcmas_tlb_t
*tlb
= NULL
;
2849 spid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID_MASK
) >> MAS6_SPID_SHIFT
;
2850 sas
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
;
2852 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
2853 int ways
= booke206_tlb_ways(env
, i
);
2855 for (j
= 0; j
< ways
; j
++) {
2856 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
2862 if (ppcmas_tlb_check(env
, tlb
, &raddr
, address
, spid
)) {
2866 if (sas
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
2870 booke206_tlb_to_mas(env
, tlb
);
2875 /* no entry found, fill with defaults */
2876 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
2877 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
2878 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
2879 env
->spr
[SPR_BOOKE_MAS3
] = 0;
2880 env
->spr
[SPR_BOOKE_MAS7
] = 0;
2882 if (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
) {
2883 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
2886 env
->spr
[SPR_BOOKE_MAS1
] |= (env
->spr
[SPR_BOOKE_MAS6
] >> 16)
2889 /* next victim logic */
2890 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
2892 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
2893 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
2896 static inline void booke206_invalidate_ea_tlb(CPUPPCState
*env
, int tlbn
,
2900 int ways
= booke206_tlb_ways(env
, tlbn
);
2903 for (i
= 0; i
< ways
; i
++) {
2904 ppcmas_tlb_t
*tlb
= booke206_get_tlbm(env
, tlbn
, ea
, i
);
2908 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
2909 if (((tlb
->mas2
& MAS2_EPN_MASK
) == (ea
& mask
)) &&
2910 !(tlb
->mas1
& MAS1_IPROT
)) {
2911 tlb
->mas1
&= ~MAS1_VALID
;
2916 void helper_booke206_tlbivax(CPUPPCState
*env
, target_ulong address
)
2920 if (address
& 0x4) {
2921 /* flush all entries */
2922 if (address
& 0x8) {
2923 /* flush all of TLB1 */
2924 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB1
, 1);
2926 /* flush all of TLB0 */
2927 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB0
, 0);
2932 if (address
& 0x8) {
2933 /* flush TLB1 entries */
2934 booke206_invalidate_ea_tlb(env
, 1, address
);
2939 /* flush TLB0 entries */
2940 booke206_invalidate_ea_tlb(env
, 0, address
);
2942 tlb_flush_page(cs
, address
& MAS2_EPN_MASK
);
2947 void helper_booke206_tlbilx0(CPUPPCState
*env
, target_ulong address
)
2949 /* XXX missing LPID handling */
2950 booke206_flush_tlb(env
, -1, 1);
2953 void helper_booke206_tlbilx1(CPUPPCState
*env
, target_ulong address
)
2956 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
2957 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
2960 /* XXX missing LPID handling */
2961 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
2962 tlb_size
= booke206_tlb_size(env
, i
);
2963 for (j
= 0; j
< tlb_size
; j
++) {
2964 if (!(tlb
[j
].mas1
& MAS1_IPROT
) &&
2965 ((tlb
[j
].mas1
& MAS1_TID_MASK
) == tid
)) {
2966 tlb
[j
].mas1
&= ~MAS1_VALID
;
2969 tlb
+= booke206_tlb_size(env
, i
);
2971 tlb_flush(env_cpu(env
));
2974 void helper_booke206_tlbilx3(CPUPPCState
*env
, target_ulong address
)
2978 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
2979 int pid
= tid
>> MAS6_SPID_SHIFT
;
2980 int sgs
= env
->spr
[SPR_BOOKE_MAS5
] & MAS5_SGS
;
2981 int ind
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SIND
) ? MAS1_IND
: 0;
2982 /* XXX check for unsupported isize and raise an invalid opcode then */
2983 int size
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_ISIZE_MASK
;
2984 /* XXX implement MAV2 handling */
2987 /* XXX missing LPID handling */
2988 /* flush by pid and ea */
2989 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
2990 int ways
= booke206_tlb_ways(env
, i
);
2992 for (j
= 0; j
< ways
; j
++) {
2993 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
2997 if ((ppcmas_tlb_check(env
, tlb
, NULL
, address
, pid
) != 0) ||
2998 (tlb
->mas1
& MAS1_IPROT
) ||
2999 ((tlb
->mas1
& MAS1_IND
) != ind
) ||
3000 ((tlb
->mas8
& MAS8_TGS
) != sgs
)) {
3003 if (mav2
&& ((tlb
->mas1
& MAS1_TSIZE_MASK
) != size
)) {
3004 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
3007 /* XXX e500mc doesn't match SAS, but other cores might */
3008 tlb
->mas1
&= ~MAS1_VALID
;
3011 tlb_flush(env_cpu(env
));
3014 void helper_booke206_tlbflush(CPUPPCState
*env
, target_ulong type
)
3019 flags
|= BOOKE206_FLUSH_TLB1
;
3023 flags
|= BOOKE206_FLUSH_TLB0
;
3026 booke206_flush_tlb(env
, flags
, 1);
3030 void helper_check_tlb_flush_local(CPUPPCState
*env
)
3032 check_tlb_flush(env
, false);
3035 void helper_check_tlb_flush_global(CPUPPCState
*env
)
3037 check_tlb_flush(env
, true);
3040 /*****************************************************************************/
3042 bool ppc_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
3043 MMUAccessType access_type
, int mmu_idx
,
3044 bool probe
, uintptr_t retaddr
)
3046 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3047 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cs
);
3048 CPUPPCState
*env
= &cpu
->env
;
3051 if (pcc
->handle_mmu_fault
) {
3052 ret
= pcc
->handle_mmu_fault(cpu
, addr
, access_type
, mmu_idx
);
3054 ret
= cpu_ppc_handle_mmu_fault(env
, addr
, access_type
, mmu_idx
);
3056 if (unlikely(ret
!= 0)) {
3060 raise_exception_err_ra(env
, cs
->exception_index
, env
->error_code
,