2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
23 #include "sysemu/kvm.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/exec-all.h"
29 #include "helper_regs.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/qemu-print.h"
34 #include "mmu-book3s-v3.h"
35 #include "mmu-radix64.h"
38 #include "exec/helper-proto.h"
39 #include "exec/cpu_ldst.h"
41 /* #define DEBUG_MMU */
42 /* #define DEBUG_BATS */
43 /* #define DEBUG_SOFTWARE_TLB */
44 /* #define DUMP_PAGE_TABLES */
45 /* #define FLUSH_ALL_TLBS */
48 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
50 # define LOG_MMU_STATE(cpu) do { } while (0)
53 #ifdef DEBUG_SOFTWARE_TLB
54 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
56 # define LOG_SWTLB(...) do { } while (0)
60 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
62 # define LOG_BATS(...) do { } while (0)
65 /*****************************************************************************/
66 /* PowerPC MMU emulation */
68 /* Context used internally during MMU translations */
69 typedef struct mmu_ctx_t mmu_ctx_t
;
71 hwaddr raddr
; /* Real address */
72 hwaddr eaddr
; /* Effective address */
73 int prot
; /* Protection bits */
74 hwaddr hash
[2]; /* Pagetable hash values */
75 target_ulong ptem
; /* Virtual segment ID | API */
76 int key
; /* Access key */
77 int nx
; /* Non-execute area */
80 /* Common routines used by software and hardware TLBs emulation */
81 static inline int pte_is_valid(target_ulong pte0
)
83 return pte0
& 0x80000000 ? 1 : 0;
86 static inline void pte_invalidate(target_ulong
*pte0
)
91 #define PTE_PTEM_MASK 0x7FFFFFBF
92 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
94 static int pp_check(int key
, int pp
, int nx
)
98 /* Compute access rights */
105 access
|= PAGE_WRITE
;
121 access
= PAGE_READ
| PAGE_WRITE
;
132 static int check_prot(int prot
, MMUAccessType access_type
)
134 return prot
& prot_for_access_type(access_type
) ? 0 : -2;
137 static int ppc6xx_tlb_pte_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
138 target_ulong pte1
, int h
,
139 MMUAccessType access_type
)
141 target_ulong ptem
, mmask
;
142 int access
, ret
, pteh
, ptev
, pp
;
145 /* Check validity and table match */
146 ptev
= pte_is_valid(pte0
);
147 pteh
= (pte0
>> 6) & 1;
148 if (ptev
&& h
== pteh
) {
149 /* Check vsid & api */
150 ptem
= pte0
& PTE_PTEM_MASK
;
151 mmask
= PTE_CHECK_MASK
;
152 pp
= pte1
& 0x00000003;
153 if (ptem
== ctx
->ptem
) {
154 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
155 /* all matches should have equal RPN, WIMG & PP */
156 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
157 qemu_log_mask(CPU_LOG_MMU
, "Bad RPN/WIMG/PP\n");
161 /* Compute access rights */
162 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
163 /* Keep the matching PTE information */
166 ret
= check_prot(ctx
->prot
, access_type
);
169 qemu_log_mask(CPU_LOG_MMU
, "PTE access granted !\n");
171 /* Access right violation */
172 qemu_log_mask(CPU_LOG_MMU
, "PTE access rejected\n");
180 static int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
181 int ret
, MMUAccessType access_type
)
185 /* Update page flags */
186 if (!(*pte1p
& 0x00000100)) {
187 /* Update accessed flag */
188 *pte1p
|= 0x00000100;
191 if (!(*pte1p
& 0x00000080)) {
192 if (access_type
== MMU_DATA_STORE
&& ret
== 0) {
193 /* Update changed flag */
194 *pte1p
|= 0x00000080;
197 /* Force page fault for first write access */
198 ctx
->prot
&= ~PAGE_WRITE
;
205 /* Software driven TLB helpers */
206 static inline int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
207 int way
, int is_code
)
211 /* Select TLB num in a way from address */
212 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
214 nr
+= env
->tlb_per_way
* way
;
215 /* 6xx have separate TLBs for instructions and data */
216 if (is_code
&& env
->id_tlbs
== 1) {
223 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState
*env
)
228 /* LOG_SWTLB("Invalidate all TLBs\n"); */
229 /* Invalidate all defined software TLB */
231 if (env
->id_tlbs
== 1) {
234 for (nr
= 0; nr
< max
; nr
++) {
235 tlb
= &env
->tlb
.tlb6
[nr
];
236 pte_invalidate(&tlb
->pte0
);
238 tlb_flush(env_cpu(env
));
241 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState
*env
,
243 int is_code
, int match_epn
)
245 #if !defined(FLUSH_ALL_TLBS)
246 CPUState
*cs
= env_cpu(env
);
250 /* Invalidate ITLB + DTLB, all ways */
251 for (way
= 0; way
< env
->nb_ways
; way
++) {
252 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
253 tlb
= &env
->tlb
.tlb6
[nr
];
254 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
255 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
257 pte_invalidate(&tlb
->pte0
);
258 tlb_flush_page(cs
, tlb
->EPN
);
262 /* XXX: PowerPC specification say this is valid as well */
263 ppc6xx_tlb_invalidate_all(env
);
267 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState
*env
,
268 target_ulong eaddr
, int is_code
)
270 ppc6xx_tlb_invalidate_virt2(env
, eaddr
, is_code
, 0);
274 static void ppc6xx_tlb_store(CPUPPCState
*env
, target_ulong EPN
, int way
,
275 int is_code
, target_ulong pte0
, target_ulong pte1
)
280 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
281 tlb
= &env
->tlb
.tlb6
[nr
];
282 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
283 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
284 /* Invalidate any pending reference in QEMU for this virtual address */
285 ppc6xx_tlb_invalidate_virt2(env
, EPN
, is_code
, 1);
289 /* Store last way for LRU mechanism */
294 static int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
295 target_ulong eaddr
, MMUAccessType access_type
)
302 ret
= -1; /* No TLB found */
303 for (way
= 0; way
< env
->nb_ways
; way
++) {
304 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, access_type
== MMU_INST_FETCH
);
305 tlb
= &env
->tlb
.tlb6
[nr
];
306 /* This test "emulates" the PTE index match for hardware TLBs */
307 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
308 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
309 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
310 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
311 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
314 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
315 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
316 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
317 tlb
->EPN
, eaddr
, tlb
->pte1
,
318 access_type
== MMU_DATA_STORE
? 'S' : 'L',
319 access_type
== MMU_INST_FETCH
? 'I' : 'D');
320 switch (ppc6xx_tlb_pte_check(ctx
, tlb
->pte0
, tlb
->pte1
,
323 /* TLB inconsistency */
326 /* Access violation */
337 * XXX: we should go on looping to check all TLBs
338 * consistency but we can speed-up the whole thing as
339 * the result would be undefined if TLBs are not
349 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
350 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
351 /* Update page flags */
352 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, access_type
);
358 /* Perform BAT hit & translation */
359 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
360 int *validp
, int *protp
, target_ulong
*BATu
,
366 bl
= (*BATu
& 0x00001FFC) << 15;
369 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
370 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
372 pp
= *BATl
& 0x00000003;
374 prot
= PAGE_READ
| PAGE_EXEC
;
385 static int get_bat_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
386 target_ulong
virtual, MMUAccessType access_type
)
388 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
389 target_ulong BEPIl
, BEPIu
, bl
;
392 bool ifetch
= access_type
== MMU_INST_FETCH
;
394 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
395 ifetch
? 'I' : 'D', virtual);
397 BATlt
= env
->IBAT
[1];
398 BATut
= env
->IBAT
[0];
400 BATlt
= env
->DBAT
[1];
401 BATut
= env
->DBAT
[0];
403 for (i
= 0; i
< env
->nb_BATs
; i
++) {
406 BEPIu
= *BATu
& 0xF0000000;
407 BEPIl
= *BATu
& 0x0FFE0000;
408 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
409 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
410 " BATl " TARGET_FMT_lx
"\n", __func__
,
411 ifetch
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
412 if ((virtual & 0xF0000000) == BEPIu
&&
413 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
416 /* Get physical address */
417 ctx
->raddr
= (*BATl
& 0xF0000000) |
418 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
419 (virtual & 0x0001F000);
420 /* Compute access rights */
422 ret
= check_prot(ctx
->prot
, access_type
);
424 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
425 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
426 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
433 #if defined(DEBUG_BATS)
434 if (qemu_log_enabled()) {
435 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
436 for (i
= 0; i
< 4; i
++) {
439 BEPIu
= *BATu
& 0xF0000000;
440 BEPIl
= *BATu
& 0x0FFE0000;
441 bl
= (*BATu
& 0x00001FFC) << 15;
442 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
443 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
444 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
445 __func__
, ifetch
? 'I' : 'D', i
, virtual,
446 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
455 /* Perform segment based translation */
456 static int get_segment_6xx_tlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
457 target_ulong eaddr
, MMUAccessType access_type
,
460 PowerPCCPU
*cpu
= env_archcpu(env
);
463 int ds
, pr
, target_page_bits
;
465 target_ulong sr
, pgidx
;
470 sr
= env
->sr
[eaddr
>> 28];
471 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
472 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
473 ds
= sr
& 0x80000000 ? 1 : 0;
474 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
475 vsid
= sr
& 0x00FFFFFF;
476 target_page_bits
= TARGET_PAGE_BITS
;
477 qemu_log_mask(CPU_LOG_MMU
,
478 "Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
479 " nip=" TARGET_FMT_lx
" lr=" TARGET_FMT_lx
480 " ir=%d dr=%d pr=%d %d t=%d\n",
481 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
482 (int)msr_dr
, pr
!= 0 ? 1 : 0, access_type
== MMU_DATA_STORE
, type
);
483 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
485 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
487 qemu_log_mask(CPU_LOG_MMU
,
488 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
489 ctx
->key
, ds
, ctx
->nx
, vsid
);
492 /* Check if instruction fetch is allowed, if needed */
493 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
494 /* Page address translation */
495 qemu_log_mask(CPU_LOG_MMU
, "htab_base " TARGET_FMT_plx
496 " htab_mask " TARGET_FMT_plx
497 " hash " TARGET_FMT_plx
"\n",
498 ppc_hash32_hpt_base(cpu
), ppc_hash32_hpt_mask(cpu
), hash
);
500 ctx
->hash
[1] = ~hash
;
502 /* Initialize real address with an invalid value */
503 ctx
->raddr
= (hwaddr
)-1ULL;
504 /* Software TLB search */
505 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, access_type
);
506 #if defined(DUMP_PAGE_TABLES)
507 if (qemu_loglevel_mask(CPU_LOG_MMU
)) {
508 CPUState
*cs
= env_cpu(env
);
510 uint32_t a0
, a1
, a2
, a3
;
512 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
513 "\n", ppc_hash32_hpt_base(cpu
),
514 ppc_hash32_hpt_mask(cpu
) + 0x80);
515 for (curaddr
= ppc_hash32_hpt_base(cpu
);
516 curaddr
< (ppc_hash32_hpt_base(cpu
)
517 + ppc_hash32_hpt_mask(cpu
) + 0x80);
519 a0
= ldl_phys(cs
->as
, curaddr
);
520 a1
= ldl_phys(cs
->as
, curaddr
+ 4);
521 a2
= ldl_phys(cs
->as
, curaddr
+ 8);
522 a3
= ldl_phys(cs
->as
, curaddr
+ 12);
523 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
524 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
525 curaddr
, a0
, a1
, a2
, a3
);
531 qemu_log_mask(CPU_LOG_MMU
, "No access allowed\n");
537 qemu_log_mask(CPU_LOG_MMU
, "direct store...\n");
538 /* Direct-store segment : absolutely *BUGGY* for now */
541 * Direct-store implies a 32-bit MMU.
542 * Check the Segment Register's bus unit ID (BUID).
544 sr
= env
->sr
[eaddr
>> 28];
545 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
547 * Memory-forced I/O controller interface access
549 * If T=1 and BUID=x'07F', the 601 performs a memory
550 * access to SR[28-31] LA[4-31], bypassing all protection
553 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
554 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
560 /* Integer load/store : only access allowed */
563 /* No code fetch is allowed in direct-store areas */
566 /* Floating point load/store */
569 /* lwarx, ldarx or srwcx. */
573 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
575 * Should make the instruction do no-op. As it already do
576 * no-op, it's quite easy :-)
584 qemu_log_mask(CPU_LOG_MMU
, "ERROR: instruction should not need "
585 "address translation\n");
588 if ((access_type
== MMU_DATA_STORE
|| ctx
->key
!= 1) &&
589 (access_type
== MMU_DATA_LOAD
|| ctx
->key
!= 0)) {
600 /* Generic TLB check function for embedded PowerPC implementations */
601 static int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
603 target_ulong address
, uint32_t pid
, int ext
,
608 /* Check valid flag */
609 if (!(tlb
->prot
& PAGE_VALID
)) {
612 mask
= ~(tlb
->size
- 1);
613 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
614 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
615 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
617 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
620 /* Check effective address */
621 if ((address
& mask
) != tlb
->EPN
) {
624 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
626 /* Extend the physical address to 36 bits */
627 *raddrp
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
634 /* Generic TLB search function for PowerPC embedded implementations */
635 static int ppcemb_tlb_search(CPUPPCState
*env
, target_ulong address
,
642 /* Default return value is no match */
644 for (i
= 0; i
< env
->nb_tlb
; i
++) {
645 tlb
= &env
->tlb
.tlbe
[i
];
646 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
656 /* Helpers specific to PowerPC 40x implementations */
657 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState
*env
)
662 for (i
= 0; i
< env
->nb_tlb
; i
++) {
663 tlb
= &env
->tlb
.tlbe
[i
];
664 tlb
->prot
&= ~PAGE_VALID
;
666 tlb_flush(env_cpu(env
));
669 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
670 target_ulong address
,
671 MMUAccessType access_type
)
675 int i
, ret
, zsel
, zpr
, pr
;
678 raddr
= (hwaddr
)-1ULL;
680 for (i
= 0; i
< env
->nb_tlb
; i
++) {
681 tlb
= &env
->tlb
.tlbe
[i
];
682 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
683 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
686 zsel
= (tlb
->attr
>> 4) & 0xF;
687 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
688 LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
689 __func__
, i
, zsel
, zpr
, access_type
, tlb
->attr
);
690 /* Check execute enable bit */
698 /* All accesses granted */
699 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
704 /* Raise Zone protection fault. */
705 env
->spr
[SPR_40x_ESR
] = 1 << 22;
713 /* Check from TLB entry */
714 ctx
->prot
= tlb
->prot
;
715 ret
= check_prot(ctx
->prot
, access_type
);
717 env
->spr
[SPR_40x_ESR
] = 0;
723 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
724 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
729 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
730 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
735 void store_40x_sler(CPUPPCState
*env
, uint32_t val
)
737 /* XXX: TO BE FIXED */
738 if (val
!= 0x00000000) {
739 cpu_abort(env_cpu(env
),
740 "Little-endian regions are not supported by now\n");
742 env
->spr
[SPR_405_SLER
] = val
;
745 static int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
746 hwaddr
*raddr
, int *prot
, target_ulong address
,
747 MMUAccessType access_type
, int i
)
751 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
752 env
->spr
[SPR_BOOKE_PID
],
753 !env
->nb_pids
, i
) >= 0) {
757 if (env
->spr
[SPR_BOOKE_PID1
] &&
758 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
759 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
763 if (env
->spr
[SPR_BOOKE_PID2
] &&
764 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
765 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
769 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
775 prot2
= tlb
->prot
& 0xF;
777 prot2
= (tlb
->prot
>> 4) & 0xF;
780 /* Check the address space */
781 if ((access_type
== MMU_INST_FETCH
? msr_ir
: msr_dr
) != (tlb
->attr
& 1)) {
782 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
787 if (prot2
& prot_for_access_type(access_type
)) {
788 LOG_SWTLB("%s: good TLB!\n", __func__
);
792 LOG_SWTLB("%s: no prot match: %x\n", __func__
, prot2
);
793 return access_type
== MMU_INST_FETCH
? -3 : -2;
796 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
797 target_ulong address
,
798 MMUAccessType access_type
)
805 raddr
= (hwaddr
)-1ULL;
806 for (i
= 0; i
< env
->nb_tlb
; i
++) {
807 tlb
= &env
->tlb
.tlbe
[i
];
808 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
817 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
818 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
821 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
822 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
829 static void booke206_flush_tlb(CPUPPCState
*env
, int flags
,
830 const int check_iprot
)
834 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
836 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
837 if (flags
& (1 << i
)) {
838 tlb_size
= booke206_tlb_size(env
, i
);
839 for (j
= 0; j
< tlb_size
; j
++) {
840 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
841 tlb
[j
].mas1
&= ~MAS1_VALID
;
845 tlb
+= booke206_tlb_size(env
, i
);
848 tlb_flush(env_cpu(env
));
852 static hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
,
857 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
859 return 1024ULL << tlbm_size
;
862 /* TLB check function for MAS based SoftTLBs */
863 static int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
864 hwaddr
*raddrp
, target_ulong address
,
871 /* In 32bit mode we can only address 32bit EAs */
872 address
= (uint32_t)address
;
875 /* Check valid flag */
876 if (!(tlb
->mas1
& MAS1_VALID
)) {
880 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
881 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
882 PRIx64
" mask=0x%" HWADDR_PRIx
" MAS7_3=0x%" PRIx64
" MAS8=0x%"
883 PRIx32
"\n", __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
,
884 tlb
->mas7_3
, tlb
->mas8
);
887 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
888 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
892 /* Check effective address */
893 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
898 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
904 static bool is_epid_mmu(int mmu_idx
)
906 return mmu_idx
== PPC_TLB_EPID_STORE
|| mmu_idx
== PPC_TLB_EPID_LOAD
;
909 static uint32_t mmubooke206_esr(int mmu_idx
, MMUAccessType access_type
)
912 if (access_type
== MMU_DATA_STORE
) {
915 if (is_epid_mmu(mmu_idx
)) {
922 * Get EPID register given the mmu_idx. If this is regular load,
923 * construct the EPID access bits from current processor state
925 * Get the effective AS and PR bits and the PID. The PID is returned
926 * only if EPID load is requested, otherwise the caller must detect
927 * the correct EPID. Return true if valid EPID is returned.
929 static bool mmubooke206_get_as(CPUPPCState
*env
,
930 int mmu_idx
, uint32_t *epid_out
,
931 bool *as_out
, bool *pr_out
)
933 if (is_epid_mmu(mmu_idx
)) {
935 if (mmu_idx
== PPC_TLB_EPID_STORE
) {
936 epidr
= env
->spr
[SPR_BOOKE_EPSC
];
938 epidr
= env
->spr
[SPR_BOOKE_EPLC
];
940 *epid_out
= (epidr
& EPID_EPID
) >> EPID_EPID_SHIFT
;
941 *as_out
= !!(epidr
& EPID_EAS
);
942 *pr_out
= !!(epidr
& EPID_EPR
);
951 /* Check if the tlb found by hashing really matches */
952 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
953 hwaddr
*raddr
, int *prot
,
954 target_ulong address
,
955 MMUAccessType access_type
, int mmu_idx
)
960 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
963 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
964 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
968 if (env
->spr
[SPR_BOOKE_PID1
] &&
969 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
970 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
974 if (env
->spr
[SPR_BOOKE_PID2
] &&
975 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
976 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
980 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
, epid
) >= 0) {
985 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
991 if (tlb
->mas7_3
& MAS3_UR
) {
994 if (tlb
->mas7_3
& MAS3_UW
) {
997 if (tlb
->mas7_3
& MAS3_UX
) {
1001 if (tlb
->mas7_3
& MAS3_SR
) {
1004 if (tlb
->mas7_3
& MAS3_SW
) {
1005 prot2
|= PAGE_WRITE
;
1007 if (tlb
->mas7_3
& MAS3_SX
) {
1012 /* Check the address space and permissions */
1013 if (access_type
== MMU_INST_FETCH
) {
1014 /* There is no way to fetch code using epid load */
1019 if (as
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1020 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1025 if (prot2
& prot_for_access_type(access_type
)) {
1026 LOG_SWTLB("%s: good TLB!\n", __func__
);
1030 LOG_SWTLB("%s: no prot match: %x\n", __func__
, prot2
);
1031 return access_type
== MMU_INST_FETCH
? -3 : -2;
1034 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1035 target_ulong address
,
1036 MMUAccessType access_type
,
1044 raddr
= (hwaddr
)-1ULL;
1046 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1047 int ways
= booke206_tlb_ways(env
, i
);
1049 for (j
= 0; j
< ways
; j
++) {
1050 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1054 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1055 access_type
, mmu_idx
);
1066 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1067 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1070 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1071 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1077 static const char *book3e_tsize_to_str
[32] = {
1078 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1079 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1080 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1084 static void mmubooke_dump_mmu(CPUPPCState
*env
)
1086 ppcemb_tlb_t
*entry
;
1089 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1090 qemu_printf("Cannot access KVM TLB\n");
1094 qemu_printf("\nTLB:\n");
1095 qemu_printf("Effective Physical Size PID Prot "
1098 entry
= &env
->tlb
.tlbe
[0];
1099 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
1102 uint64_t size
= (uint64_t)entry
->size
;
1105 /* Check valid flag */
1106 if (!(entry
->prot
& PAGE_VALID
)) {
1110 mask
= ~(entry
->size
- 1);
1111 ea
= entry
->EPN
& mask
;
1112 pa
= entry
->RPN
& mask
;
1113 /* Extend the physical address to 36 bits */
1114 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
1115 if (size
>= 1 * MiB
) {
1116 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ MiB
);
1118 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
/ KiB
);
1120 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
1121 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
1122 entry
->prot
, entry
->attr
);
1127 static void mmubooke206_dump_one_tlb(CPUPPCState
*env
, int tlbn
, int offset
,
1130 ppcmas_tlb_t
*entry
;
1133 qemu_printf("\nTLB%d:\n", tlbn
);
1134 qemu_printf("Effective Physical Size TID TS SRWX"
1135 " URWX WIMGE U0123\n");
1137 entry
= &env
->tlb
.tlbm
[offset
];
1138 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
1139 hwaddr ea
, pa
, size
;
1142 if (!(entry
->mas1
& MAS1_VALID
)) {
1146 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1147 size
= 1024ULL << tsize
;
1148 ea
= entry
->mas2
& ~(size
- 1);
1149 pa
= entry
->mas7_3
& ~(size
- 1);
1151 qemu_printf("0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
1152 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1153 (uint64_t)ea
, (uint64_t)pa
,
1154 book3e_tsize_to_str
[tsize
],
1155 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1156 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1157 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1158 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1159 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1160 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1161 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1162 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1163 entry
->mas2
& MAS2_W
? 'W' : '-',
1164 entry
->mas2
& MAS2_I
? 'I' : '-',
1165 entry
->mas2
& MAS2_M
? 'M' : '-',
1166 entry
->mas2
& MAS2_G
? 'G' : '-',
1167 entry
->mas2
& MAS2_E
? 'E' : '-',
1168 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1169 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1170 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1171 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1175 static void mmubooke206_dump_mmu(CPUPPCState
*env
)
1180 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1181 qemu_printf("Cannot access KVM TLB\n");
1185 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1186 int size
= booke206_tlb_size(env
, i
);
1192 mmubooke206_dump_one_tlb(env
, i
, offset
, size
);
1197 static void mmu6xx_dump_BATs(CPUPPCState
*env
, int type
)
1199 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
1200 target_ulong BEPIl
, BEPIu
, bl
;
1205 BATlt
= env
->IBAT
[1];
1206 BATut
= env
->IBAT
[0];
1209 BATlt
= env
->DBAT
[1];
1210 BATut
= env
->DBAT
[0];
1214 for (i
= 0; i
< env
->nb_BATs
; i
++) {
1217 BEPIu
= *BATu
& 0xF0000000;
1218 BEPIl
= *BATu
& 0x0FFE0000;
1219 bl
= (*BATu
& 0x00001FFC) << 15;
1220 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1221 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
1222 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
1223 type
== ACCESS_CODE
? "code" : "data", i
,
1224 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
1228 static void mmu6xx_dump_mmu(CPUPPCState
*env
)
1230 PowerPCCPU
*cpu
= env_archcpu(env
);
1233 int type
, way
, entry
, i
;
1235 qemu_printf("HTAB base = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_base(cpu
));
1236 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx
"\n", ppc_hash32_hpt_mask(cpu
));
1238 qemu_printf("\nSegment registers:\n");
1239 for (i
= 0; i
< 32; i
++) {
1241 if (sr
& 0x80000000) {
1242 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1243 "CNTLR_SPEC=0x%05x\n", i
,
1244 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1245 sr
& 0x20000000 ? 1 : 0, (uint32_t)((sr
>> 20) & 0x1FF),
1246 (uint32_t)(sr
& 0xFFFFF));
1248 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i
,
1249 sr
& 0x80000000 ? 1 : 0, sr
& 0x40000000 ? 1 : 0,
1250 sr
& 0x20000000 ? 1 : 0, sr
& 0x10000000 ? 1 : 0,
1251 (uint32_t)(sr
& 0x00FFFFFF));
1255 qemu_printf("\nBATs:\n");
1256 mmu6xx_dump_BATs(env
, ACCESS_INT
);
1257 mmu6xx_dump_BATs(env
, ACCESS_CODE
);
1259 if (env
->id_tlbs
!= 1) {
1260 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1261 " for code and data\n");
1264 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1266 for (type
= 0; type
< 2; type
++) {
1267 for (way
= 0; way
< env
->nb_ways
; way
++) {
1268 for (entry
= env
->nb_tlb
* type
+ env
->tlb_per_way
* way
;
1269 entry
< (env
->nb_tlb
* type
+ env
->tlb_per_way
* (way
+ 1));
1272 tlb
= &env
->tlb
.tlb6
[entry
];
1273 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1274 TARGET_FMT_lx
" " TARGET_FMT_lx
"]\n",
1275 type
? "code" : "data", entry
% env
->nb_tlb
,
1277 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
1278 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
);
1284 void dump_mmu(CPUPPCState
*env
)
1286 switch (env
->mmu_model
) {
1287 case POWERPC_MMU_BOOKE
:
1288 mmubooke_dump_mmu(env
);
1290 case POWERPC_MMU_BOOKE206
:
1291 mmubooke206_dump_mmu(env
);
1293 case POWERPC_MMU_SOFT_6xx
:
1294 case POWERPC_MMU_SOFT_74xx
:
1295 mmu6xx_dump_mmu(env
);
1297 #if defined(TARGET_PPC64)
1298 case POWERPC_MMU_64B
:
1299 case POWERPC_MMU_2_03
:
1300 case POWERPC_MMU_2_06
:
1301 case POWERPC_MMU_2_07
:
1302 dump_slb(env_archcpu(env
));
1304 case POWERPC_MMU_3_00
:
1305 if (ppc64_v3_radix(env_archcpu(env
))) {
1306 qemu_log_mask(LOG_UNIMP
, "%s: the PPC64 MMU is unsupported\n",
1309 dump_slb(env_archcpu(env
));
1314 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1318 static int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
, target_ulong eaddr
,
1319 MMUAccessType access_type
)
1324 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1326 switch (env
->mmu_model
) {
1327 case POWERPC_MMU_SOFT_6xx
:
1328 case POWERPC_MMU_SOFT_74xx
:
1329 case POWERPC_MMU_SOFT_4xx
:
1330 case POWERPC_MMU_REAL
:
1331 case POWERPC_MMU_BOOKE
:
1332 ctx
->prot
|= PAGE_WRITE
;
1335 case POWERPC_MMU_SOFT_4xx_Z
:
1336 if (unlikely(msr_pe
!= 0)) {
1338 * 403 family add some particular protections, using
1339 * PBL/PBU registers for accesses with no translation.
1342 /* Check PLB validity */
1343 (env
->pb
[0] < env
->pb
[1] &&
1344 /* and address in plb area */
1345 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1346 (env
->pb
[2] < env
->pb
[3] &&
1347 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1348 if (in_plb
^ msr_px
) {
1349 /* Access in protected area */
1350 if (access_type
== MMU_DATA_STORE
) {
1351 /* Access is not allowed */
1355 /* Read-write access is allowed */
1356 ctx
->prot
|= PAGE_WRITE
;
1362 /* Caller's checks mean we should never get here for other models */
1370 static int get_physical_address_wtlb(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1372 MMUAccessType access_type
, int type
,
1376 bool real_mode
= (type
== ACCESS_CODE
&& msr_ir
== 0)
1377 || (type
!= ACCESS_CODE
&& msr_dr
== 0);
1379 switch (env
->mmu_model
) {
1380 case POWERPC_MMU_SOFT_6xx
:
1381 case POWERPC_MMU_SOFT_74xx
:
1383 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1385 /* Try to find a BAT */
1386 if (env
->nb_BATs
!= 0) {
1387 ret
= get_bat_6xx_tlb(env
, ctx
, eaddr
, access_type
);
1390 /* We didn't match any BAT entry or don't have BATs */
1391 ret
= get_segment_6xx_tlb(env
, ctx
, eaddr
, access_type
, type
);
1396 case POWERPC_MMU_SOFT_4xx
:
1397 case POWERPC_MMU_SOFT_4xx_Z
:
1399 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1401 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
, access_type
);
1404 case POWERPC_MMU_BOOKE
:
1405 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
, access_type
);
1407 case POWERPC_MMU_BOOKE206
:
1408 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, access_type
,
1411 case POWERPC_MMU_MPC8xx
:
1413 cpu_abort(env_cpu(env
), "MPC8xx MMU model is not implemented\n");
1415 case POWERPC_MMU_REAL
:
1417 ret
= check_physical(env
, ctx
, eaddr
, access_type
);
1419 cpu_abort(env_cpu(env
),
1420 "PowerPC in real mode do not do any translation\n");
1424 cpu_abort(env_cpu(env
), "Unknown or invalid MMU model\n");
1432 static int get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1433 target_ulong eaddr
, MMUAccessType access_type
,
1436 return get_physical_address_wtlb(env
, ctx
, eaddr
, access_type
, type
, 0);
1440 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1441 MMUAccessType access_type
, int mmu_idx
)
1445 uint32_t missed_tid
= 0;
1446 bool use_epid
= mmubooke206_get_as(env
, mmu_idx
, &epid
, &as
, &pr
);
1448 if (access_type
== MMU_INST_FETCH
) {
1451 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1452 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1453 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1454 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1455 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1456 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1460 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1461 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1464 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1465 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1468 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1469 case MAS4_TIDSELD_PID0
:
1470 missed_tid
= env
->spr
[SPR_BOOKE_PID
];
1472 case MAS4_TIDSELD_PID1
:
1473 missed_tid
= env
->spr
[SPR_BOOKE_PID1
];
1475 case MAS4_TIDSELD_PID2
:
1476 missed_tid
= env
->spr
[SPR_BOOKE_PID2
];
1479 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1482 env
->spr
[SPR_BOOKE_MAS6
] |= missed_tid
<< 16;
1484 env
->spr
[SPR_BOOKE_MAS1
] |= (missed_tid
<< MAS1_TID_SHIFT
);
1487 /* next victim logic */
1488 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1490 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1491 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1494 /* Perform address translation */
1495 /* TODO: Split this by mmu_model. */
1496 static bool ppc_jumbo_xlate(PowerPCCPU
*cpu
, vaddr eaddr
,
1497 MMUAccessType access_type
,
1498 hwaddr
*raddrp
, int *psizep
, int *protp
,
1499 int mmu_idx
, bool guest_visible
)
1501 CPUState
*cs
= CPU(cpu
);
1502 CPUPPCState
*env
= &cpu
->env
;
1507 if (access_type
== MMU_INST_FETCH
) {
1510 } else if (guest_visible
) {
1512 type
= env
->access_type
;
1517 ret
= get_physical_address_wtlb(env
, &ctx
, eaddr
, access_type
,
1520 *raddrp
= ctx
.raddr
;
1522 *psizep
= TARGET_PAGE_BITS
;
1526 if (guest_visible
) {
1528 if (type
== ACCESS_CODE
) {
1531 /* No matches in page tables or TLB */
1532 switch (env
->mmu_model
) {
1533 case POWERPC_MMU_SOFT_6xx
:
1534 cs
->exception_index
= POWERPC_EXCP_IFTLB
;
1535 env
->error_code
= 1 << 18;
1536 env
->spr
[SPR_IMISS
] = eaddr
;
1537 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1539 case POWERPC_MMU_SOFT_74xx
:
1540 cs
->exception_index
= POWERPC_EXCP_IFTLB
;
1542 case POWERPC_MMU_SOFT_4xx
:
1543 case POWERPC_MMU_SOFT_4xx_Z
:
1544 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1545 env
->error_code
= 0;
1546 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1547 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1549 case POWERPC_MMU_BOOKE206
:
1550 booke206_update_mas_tlb_miss(env
, eaddr
, 2, mmu_idx
);
1552 case POWERPC_MMU_BOOKE
:
1553 cs
->exception_index
= POWERPC_EXCP_ITLB
;
1554 env
->error_code
= 0;
1555 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1556 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, MMU_DATA_LOAD
);
1558 case POWERPC_MMU_MPC8xx
:
1559 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1560 case POWERPC_MMU_REAL
:
1561 cpu_abort(cs
, "PowerPC in real mode should never raise "
1562 "any MMU exceptions\n");
1564 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1568 /* Access rights violation */
1569 cs
->exception_index
= POWERPC_EXCP_ISI
;
1570 env
->error_code
= 0x08000000;
1573 /* No execute protection violation */
1574 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1575 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1576 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1578 cs
->exception_index
= POWERPC_EXCP_ISI
;
1579 env
->error_code
= 0x10000000;
1582 /* Direct store exception */
1583 /* No code fetch is allowed in direct-store areas */
1584 cs
->exception_index
= POWERPC_EXCP_ISI
;
1585 env
->error_code
= 0x10000000;
1591 /* No matches in page tables or TLB */
1592 switch (env
->mmu_model
) {
1593 case POWERPC_MMU_SOFT_6xx
:
1594 if (access_type
== MMU_DATA_STORE
) {
1595 cs
->exception_index
= POWERPC_EXCP_DSTLB
;
1596 env
->error_code
= 1 << 16;
1598 cs
->exception_index
= POWERPC_EXCP_DLTLB
;
1599 env
->error_code
= 0;
1601 env
->spr
[SPR_DMISS
] = eaddr
;
1602 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1604 env
->error_code
|= ctx
.key
<< 19;
1605 env
->spr
[SPR_HASH1
] = ppc_hash32_hpt_base(cpu
) +
1606 get_pteg_offset32(cpu
, ctx
.hash
[0]);
1607 env
->spr
[SPR_HASH2
] = ppc_hash32_hpt_base(cpu
) +
1608 get_pteg_offset32(cpu
, ctx
.hash
[1]);
1610 case POWERPC_MMU_SOFT_74xx
:
1611 if (access_type
== MMU_DATA_STORE
) {
1612 cs
->exception_index
= POWERPC_EXCP_DSTLB
;
1614 cs
->exception_index
= POWERPC_EXCP_DLTLB
;
1617 /* Implement LRU algorithm */
1618 env
->error_code
= ctx
.key
<< 19;
1619 env
->spr
[SPR_TLBMISS
] = (eaddr
& ~((target_ulong
)0x3)) |
1620 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1621 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1623 case POWERPC_MMU_SOFT_4xx
:
1624 case POWERPC_MMU_SOFT_4xx_Z
:
1625 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1626 env
->error_code
= 0;
1627 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1628 if (access_type
== MMU_DATA_STORE
) {
1629 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1631 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1634 case POWERPC_MMU_MPC8xx
:
1636 cpu_abort(cs
, "MPC8xx MMU model is not implemented\n");
1637 case POWERPC_MMU_BOOKE206
:
1638 booke206_update_mas_tlb_miss(env
, eaddr
, access_type
, mmu_idx
);
1640 case POWERPC_MMU_BOOKE
:
1641 cs
->exception_index
= POWERPC_EXCP_DTLB
;
1642 env
->error_code
= 0;
1643 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1644 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, access_type
);
1646 case POWERPC_MMU_REAL
:
1647 cpu_abort(cs
, "PowerPC in real mode should never raise "
1648 "any MMU exceptions\n");
1650 cpu_abort(cs
, "Unknown or invalid MMU model\n");
1654 /* Access rights violation */
1655 cs
->exception_index
= POWERPC_EXCP_DSI
;
1656 env
->error_code
= 0;
1657 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
1658 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
1659 env
->spr
[SPR_40x_DEAR
] = eaddr
;
1660 if (access_type
== MMU_DATA_STORE
) {
1661 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
1663 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1664 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1665 env
->spr
[SPR_BOOKE_DEAR
] = eaddr
;
1666 env
->spr
[SPR_BOOKE_ESR
] = mmubooke206_esr(mmu_idx
, access_type
);
1668 env
->spr
[SPR_DAR
] = eaddr
;
1669 if (access_type
== MMU_DATA_STORE
) {
1670 env
->spr
[SPR_DSISR
] = 0x0A000000;
1672 env
->spr
[SPR_DSISR
] = 0x08000000;
1677 /* Direct store exception */
1680 /* Floating point load/store */
1681 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
1682 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
1683 env
->spr
[SPR_DAR
] = eaddr
;
1686 /* lwarx, ldarx or stwcx. */
1687 cs
->exception_index
= POWERPC_EXCP_DSI
;
1688 env
->error_code
= 0;
1689 env
->spr
[SPR_DAR
] = eaddr
;
1690 if (access_type
== MMU_DATA_STORE
) {
1691 env
->spr
[SPR_DSISR
] = 0x06000000;
1693 env
->spr
[SPR_DSISR
] = 0x04000000;
1697 /* eciwx or ecowx */
1698 cs
->exception_index
= POWERPC_EXCP_DSI
;
1699 env
->error_code
= 0;
1700 env
->spr
[SPR_DAR
] = eaddr
;
1701 if (access_type
== MMU_DATA_STORE
) {
1702 env
->spr
[SPR_DSISR
] = 0x06100000;
1704 env
->spr
[SPR_DSISR
] = 0x04100000;
1708 printf("DSI: invalid exception (%d)\n", ret
);
1709 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
1711 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
1712 env
->spr
[SPR_DAR
] = eaddr
;
1723 /*****************************************************************************/
1724 /* BATs management */
1725 #if !defined(FLUSH_ALL_TLBS)
1726 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
1729 CPUState
*cs
= env_cpu(env
);
1730 target_ulong base
, end
, page
;
1732 base
= BATu
& ~0x0001FFFF;
1733 end
= base
+ mask
+ 0x00020000;
1734 if (((end
- base
) >> TARGET_PAGE_BITS
) > 1024) {
1735 /* Flushing 1024 4K pages is slower than a complete flush */
1736 LOG_BATS("Flush all BATs\n");
1738 LOG_BATS("Flush done\n");
1741 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
1742 TARGET_FMT_lx
")\n", base
, end
, mask
);
1743 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
1744 tlb_flush_page(cs
, page
);
1746 LOG_BATS("Flush done\n");
1750 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
1753 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
1754 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
1757 void helper_store_ibatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1761 dump_store_bat(env
, 'I', 0, nr
, value
);
1762 if (env
->IBAT
[0][nr
] != value
) {
1763 mask
= (value
<< 15) & 0x0FFE0000UL
;
1764 #if !defined(FLUSH_ALL_TLBS)
1765 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1768 * When storing valid upper BAT, mask BEPI and BRPN and
1769 * invalidate all TLBs covered by this BAT
1771 mask
= (value
<< 15) & 0x0FFE0000UL
;
1772 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1773 (value
& ~0x0001FFFFUL
& ~mask
);
1774 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
1775 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
1776 #if !defined(FLUSH_ALL_TLBS)
1777 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1779 tlb_flush(env_cpu(env
));
1784 void helper_store_ibatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1786 dump_store_bat(env
, 'I', 1, nr
, value
);
1787 env
->IBAT
[1][nr
] = value
;
1790 void helper_store_dbatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1794 dump_store_bat(env
, 'D', 0, nr
, value
);
1795 if (env
->DBAT
[0][nr
] != value
) {
1797 * When storing valid upper BAT, mask BEPI and BRPN and
1798 * invalidate all TLBs covered by this BAT
1800 mask
= (value
<< 15) & 0x0FFE0000UL
;
1801 #if !defined(FLUSH_ALL_TLBS)
1802 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
1804 mask
= (value
<< 15) & 0x0FFE0000UL
;
1805 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1806 (value
& ~0x0001FFFFUL
& ~mask
);
1807 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
1808 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
1809 #if !defined(FLUSH_ALL_TLBS)
1810 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
1812 tlb_flush(env_cpu(env
));
1817 void helper_store_dbatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1819 dump_store_bat(env
, 'D', 1, nr
, value
);
1820 env
->DBAT
[1][nr
] = value
;
1823 void helper_store_601_batu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1826 #if defined(FLUSH_ALL_TLBS)
1830 dump_store_bat(env
, 'I', 0, nr
, value
);
1831 if (env
->IBAT
[0][nr
] != value
) {
1832 #if defined(FLUSH_ALL_TLBS)
1835 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
1836 if (env
->IBAT
[1][nr
] & 0x40) {
1837 /* Invalidate BAT only if it is valid */
1838 #if !defined(FLUSH_ALL_TLBS)
1839 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1845 * When storing valid upper BAT, mask BEPI and BRPN and
1846 * invalidate all TLBs covered by this BAT
1848 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1849 (value
& ~0x0001FFFFUL
& ~mask
);
1850 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
1851 if (env
->IBAT
[1][nr
] & 0x40) {
1852 #if !defined(FLUSH_ALL_TLBS)
1853 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1858 #if defined(FLUSH_ALL_TLBS)
1860 tlb_flush(env_cpu(env
));
1866 void helper_store_601_batl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
1868 #if !defined(FLUSH_ALL_TLBS)
1874 dump_store_bat(env
, 'I', 1, nr
, value
);
1875 if (env
->IBAT
[1][nr
] != value
) {
1876 #if defined(FLUSH_ALL_TLBS)
1879 if (env
->IBAT
[1][nr
] & 0x40) {
1880 #if !defined(FLUSH_ALL_TLBS)
1881 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
1882 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1888 #if !defined(FLUSH_ALL_TLBS)
1889 mask
= (value
<< 17) & 0x0FFE0000UL
;
1890 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1895 env
->IBAT
[1][nr
] = value
;
1896 env
->DBAT
[1][nr
] = value
;
1897 #if defined(FLUSH_ALL_TLBS)
1899 tlb_flush(env_cpu(env
));
1907 /*****************************************************************************/
1908 /* TLB management */
1909 void ppc_tlb_invalidate_all(CPUPPCState
*env
)
1911 #if defined(TARGET_PPC64)
1912 if (mmu_is_64bit(env
->mmu_model
)) {
1913 env
->tlb_need_flush
= 0;
1914 tlb_flush(env_cpu(env
));
1916 #endif /* defined(TARGET_PPC64) */
1917 switch (env
->mmu_model
) {
1918 case POWERPC_MMU_SOFT_6xx
:
1919 case POWERPC_MMU_SOFT_74xx
:
1920 ppc6xx_tlb_invalidate_all(env
);
1922 case POWERPC_MMU_SOFT_4xx
:
1923 case POWERPC_MMU_SOFT_4xx_Z
:
1924 ppc4xx_tlb_invalidate_all(env
);
1926 case POWERPC_MMU_REAL
:
1927 cpu_abort(env_cpu(env
), "No TLB for PowerPC 4xx in real mode\n");
1929 case POWERPC_MMU_MPC8xx
:
1931 cpu_abort(env_cpu(env
), "MPC8xx MMU model is not implemented\n");
1933 case POWERPC_MMU_BOOKE
:
1934 tlb_flush(env_cpu(env
));
1936 case POWERPC_MMU_BOOKE206
:
1937 booke206_flush_tlb(env
, -1, 0);
1939 case POWERPC_MMU_32B
:
1940 case POWERPC_MMU_601
:
1941 env
->tlb_need_flush
= 0;
1942 tlb_flush(env_cpu(env
));
1946 cpu_abort(env_cpu(env
), "Unknown MMU model %x\n", env
->mmu_model
);
1953 void ppc_tlb_invalidate_one(CPUPPCState
*env
, target_ulong addr
)
1955 #if !defined(FLUSH_ALL_TLBS)
1956 addr
&= TARGET_PAGE_MASK
;
1957 #if defined(TARGET_PPC64)
1958 if (mmu_is_64bit(env
->mmu_model
)) {
1959 /* tlbie invalidate TLBs for all segments */
1961 * XXX: given the fact that there are too many segments to invalidate,
1962 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
1963 * we just invalidate all TLBs
1965 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
1967 #endif /* defined(TARGET_PPC64) */
1968 switch (env
->mmu_model
) {
1969 case POWERPC_MMU_SOFT_6xx
:
1970 case POWERPC_MMU_SOFT_74xx
:
1971 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
1972 if (env
->id_tlbs
== 1) {
1973 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
1976 case POWERPC_MMU_32B
:
1977 case POWERPC_MMU_601
:
1979 * Actual CPUs invalidate entire congruence classes based on
1980 * the geometry of their TLBs and some OSes take that into
1981 * account, we just mark the TLB to be flushed later (context
1982 * synchronizing event or sync instruction on 32-bit).
1984 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
1987 /* Should never reach here with other MMU models */
1991 ppc_tlb_invalidate_all(env
);
1995 /*****************************************************************************/
1996 /* Special registers manipulation */
1998 /* Segment registers load and store */
1999 target_ulong
helper_load_sr(CPUPPCState
*env
, target_ulong sr_num
)
2001 #if defined(TARGET_PPC64)
2002 if (mmu_is_64bit(env
->mmu_model
)) {
2007 return env
->sr
[sr_num
];
2010 void helper_store_sr(CPUPPCState
*env
, target_ulong srnum
, target_ulong value
)
2012 qemu_log_mask(CPU_LOG_MMU
,
2013 "%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2014 (int)srnum
, value
, env
->sr
[srnum
]);
2015 #if defined(TARGET_PPC64)
2016 if (mmu_is_64bit(env
->mmu_model
)) {
2017 PowerPCCPU
*cpu
= env_archcpu(env
);
2018 uint64_t esid
, vsid
;
2021 esid
= ((uint64_t)(srnum
& 0xf) << 28) | SLB_ESID_V
;
2024 vsid
= (value
& 0xfffffff) << 12;
2026 vsid
|= ((value
>> 27) & 0xf) << 8;
2028 ppc_store_slb(cpu
, srnum
, esid
, vsid
);
2031 if (env
->sr
[srnum
] != value
) {
2032 env
->sr
[srnum
] = value
;
2034 * Invalidating 256MB of virtual memory in 4kB pages is way
2035 * longer than flushing the whole TLB.
2037 #if !defined(FLUSH_ALL_TLBS) && 0
2039 target_ulong page
, end
;
2040 /* Invalidate 256 MB of virtual memory */
2041 page
= (16 << 20) * srnum
;
2042 end
= page
+ (16 << 20);
2043 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2044 tlb_flush_page(env_cpu(env
), page
);
2048 env
->tlb_need_flush
|= TLB_NEED_LOCAL_FLUSH
;
2053 /* TLB management */
2054 void helper_tlbia(CPUPPCState
*env
)
2056 ppc_tlb_invalidate_all(env
);
2059 void helper_tlbie(CPUPPCState
*env
, target_ulong addr
)
2061 ppc_tlb_invalidate_one(env
, addr
);
2064 void helper_tlbiva(CPUPPCState
*env
, target_ulong addr
)
2066 /* tlbiva instruction only exists on BookE */
2067 assert(env
->mmu_model
== POWERPC_MMU_BOOKE
);
2069 cpu_abort(env_cpu(env
), "BookE MMU model is not implemented\n");
2072 /* Software driven TLBs management */
2073 /* PowerPC 602/603 software TLB load instructions helpers */
2074 static void do_6xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2076 target_ulong RPN
, CMP
, EPN
;
2079 RPN
= env
->spr
[SPR_RPA
];
2081 CMP
= env
->spr
[SPR_ICMP
];
2082 EPN
= env
->spr
[SPR_IMISS
];
2084 CMP
= env
->spr
[SPR_DCMP
];
2085 EPN
= env
->spr
[SPR_DMISS
];
2087 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
2088 (void)EPN
; /* avoid a compiler warning */
2089 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2090 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2092 /* Store this TLB */
2093 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2094 way
, is_code
, CMP
, RPN
);
2097 void helper_6xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2099 do_6xx_tlb(env
, EPN
, 0);
2102 void helper_6xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2104 do_6xx_tlb(env
, EPN
, 1);
2107 /* PowerPC 74xx software TLB load instructions helpers */
2108 static void do_74xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2110 target_ulong RPN
, CMP
, EPN
;
2113 RPN
= env
->spr
[SPR_PTELO
];
2114 CMP
= env
->spr
[SPR_PTEHI
];
2115 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
2116 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
2117 (void)EPN
; /* avoid a compiler warning */
2118 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2119 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2121 /* Store this TLB */
2122 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2123 way
, is_code
, CMP
, RPN
);
2126 void helper_74xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2128 do_74xx_tlb(env
, EPN
, 0);
2131 void helper_74xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2133 do_74xx_tlb(env
, EPN
, 1);
2136 /*****************************************************************************/
2137 /* PowerPC 601 specific instructions (POWER bridge) */
2139 target_ulong
helper_rac(CPUPPCState
*env
, target_ulong addr
)
2143 target_ulong ret
= 0;
2146 * We don't have to generate many instances of this instruction,
2147 * as rac is supervisor only.
2149 * XXX: FIX THIS: Pretend we have no BAT
2151 nb_BATs
= env
->nb_BATs
;
2153 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0) {
2156 env
->nb_BATs
= nb_BATs
;
2160 static inline target_ulong
booke_tlb_to_page_size(int size
)
2162 return 1024 << (2 * size
);
2165 static inline int booke_page_size_to_tlb(target_ulong page_size
)
2169 switch (page_size
) {
2203 #if defined(TARGET_PPC64)
2204 case 0x000100000000ULL
:
2207 case 0x000400000000ULL
:
2210 case 0x001000000000ULL
:
2213 case 0x004000000000ULL
:
2216 case 0x010000000000ULL
:
2228 /* Helpers for 4xx TLB management */
2229 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2231 #define PPC4XX_TLBHI_V 0x00000040
2232 #define PPC4XX_TLBHI_E 0x00000020
2233 #define PPC4XX_TLBHI_SIZE_MIN 0
2234 #define PPC4XX_TLBHI_SIZE_MAX 7
2235 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2236 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2237 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2239 #define PPC4XX_TLBLO_EX 0x00000200
2240 #define PPC4XX_TLBLO_WR 0x00000100
2241 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2242 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2244 target_ulong
helper_4xx_tlbre_hi(CPUPPCState
*env
, target_ulong entry
)
2250 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2251 tlb
= &env
->tlb
.tlbe
[entry
];
2253 if (tlb
->prot
& PAGE_VALID
) {
2254 ret
|= PPC4XX_TLBHI_V
;
2256 size
= booke_page_size_to_tlb(tlb
->size
);
2257 if (size
< PPC4XX_TLBHI_SIZE_MIN
|| size
> PPC4XX_TLBHI_SIZE_MAX
) {
2258 size
= PPC4XX_TLBHI_SIZE_DEFAULT
;
2260 ret
|= size
<< PPC4XX_TLBHI_SIZE_SHIFT
;
2261 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
2265 target_ulong
helper_4xx_tlbre_lo(CPUPPCState
*env
, target_ulong entry
)
2270 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2271 tlb
= &env
->tlb
.tlbe
[entry
];
2273 if (tlb
->prot
& PAGE_EXEC
) {
2274 ret
|= PPC4XX_TLBLO_EX
;
2276 if (tlb
->prot
& PAGE_WRITE
) {
2277 ret
|= PPC4XX_TLBLO_WR
;
2282 void helper_4xx_tlbwe_hi(CPUPPCState
*env
, target_ulong entry
,
2285 CPUState
*cs
= env_cpu(env
);
2287 target_ulong page
, end
;
2289 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2291 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2292 tlb
= &env
->tlb
.tlbe
[entry
];
2293 /* Invalidate previous TLB (if it's valid) */
2294 if (tlb
->prot
& PAGE_VALID
) {
2295 end
= tlb
->EPN
+ tlb
->size
;
2296 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
2297 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2298 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2299 tlb_flush_page(cs
, page
);
2302 tlb
->size
= booke_tlb_to_page_size((val
>> PPC4XX_TLBHI_SIZE_SHIFT
)
2303 & PPC4XX_TLBHI_SIZE_MASK
);
2305 * We cannot handle TLB size < TARGET_PAGE_SIZE.
2306 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
2308 if ((val
& PPC4XX_TLBHI_V
) && tlb
->size
< TARGET_PAGE_SIZE
) {
2309 cpu_abort(cs
, "TLB size " TARGET_FMT_lu
" < %u "
2310 "are not supported (%d)\n"
2311 "Please implement TARGET_PAGE_BITS_VARY\n",
2312 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
2314 tlb
->EPN
= val
& ~(tlb
->size
- 1);
2315 if (val
& PPC4XX_TLBHI_V
) {
2316 tlb
->prot
|= PAGE_VALID
;
2317 if (val
& PPC4XX_TLBHI_E
) {
2318 /* XXX: TO BE FIXED */
2320 "Little-endian TLB entries are not supported by now\n");
2323 tlb
->prot
&= ~PAGE_VALID
;
2325 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
2326 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2327 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2328 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2329 tlb
->prot
& PAGE_READ
? 'r' : '-',
2330 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2331 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2332 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2333 /* Invalidate new TLB (if valid) */
2334 if (tlb
->prot
& PAGE_VALID
) {
2335 end
= tlb
->EPN
+ tlb
->size
;
2336 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
2337 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2338 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2339 tlb_flush_page(cs
, page
);
2344 void helper_4xx_tlbwe_lo(CPUPPCState
*env
, target_ulong entry
,
2349 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2351 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2352 tlb
= &env
->tlb
.tlbe
[entry
];
2353 tlb
->attr
= val
& PPC4XX_TLBLO_ATTR_MASK
;
2354 tlb
->RPN
= val
& PPC4XX_TLBLO_RPN_MASK
;
2355 tlb
->prot
= PAGE_READ
;
2356 if (val
& PPC4XX_TLBLO_EX
) {
2357 tlb
->prot
|= PAGE_EXEC
;
2359 if (val
& PPC4XX_TLBLO_WR
) {
2360 tlb
->prot
|= PAGE_WRITE
;
2362 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2363 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2364 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2365 tlb
->prot
& PAGE_READ
? 'r' : '-',
2366 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2367 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2368 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2371 target_ulong
helper_4xx_tlbsx(CPUPPCState
*env
, target_ulong address
)
2373 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
2376 /* PowerPC 440 TLB management */
2377 void helper_440_tlbwe(CPUPPCState
*env
, uint32_t word
, target_ulong entry
,
2381 target_ulong EPN
, RPN
, size
;
2384 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
2385 __func__
, word
, (int)entry
, value
);
2388 tlb
= &env
->tlb
.tlbe
[entry
];
2391 /* Just here to please gcc */
2393 EPN
= value
& 0xFFFFFC00;
2394 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
) {
2398 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
2399 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
) {
2404 tlb
->attr
|= (value
>> 8) & 1;
2405 if (value
& 0x200) {
2406 tlb
->prot
|= PAGE_VALID
;
2408 if (tlb
->prot
& PAGE_VALID
) {
2409 tlb
->prot
&= ~PAGE_VALID
;
2413 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
2414 if (do_flush_tlbs
) {
2415 tlb_flush(env_cpu(env
));
2419 RPN
= value
& 0xFFFFFC0F;
2420 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
) {
2421 tlb_flush(env_cpu(env
));
2426 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
2427 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
2429 tlb
->prot
|= PAGE_READ
<< 4;
2432 tlb
->prot
|= PAGE_WRITE
<< 4;
2435 tlb
->prot
|= PAGE_EXEC
<< 4;
2438 tlb
->prot
|= PAGE_READ
;
2441 tlb
->prot
|= PAGE_WRITE
;
2444 tlb
->prot
|= PAGE_EXEC
;
2450 target_ulong
helper_440_tlbre(CPUPPCState
*env
, uint32_t word
,
2458 tlb
= &env
->tlb
.tlbe
[entry
];
2461 /* Just here to please gcc */
2464 size
= booke_page_size_to_tlb(tlb
->size
);
2465 if (size
< 0 || size
> 0xF) {
2469 if (tlb
->attr
& 0x1) {
2472 if (tlb
->prot
& PAGE_VALID
) {
2475 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
2476 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
2482 ret
= tlb
->attr
& ~0x1;
2483 if (tlb
->prot
& (PAGE_READ
<< 4)) {
2486 if (tlb
->prot
& (PAGE_WRITE
<< 4)) {
2489 if (tlb
->prot
& (PAGE_EXEC
<< 4)) {
2492 if (tlb
->prot
& PAGE_READ
) {
2495 if (tlb
->prot
& PAGE_WRITE
) {
2498 if (tlb
->prot
& PAGE_EXEC
) {
2506 target_ulong
helper_440_tlbsx(CPUPPCState
*env
, target_ulong address
)
2508 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
2511 /* PowerPC BookE 2.06 TLB management */
2513 static ppcmas_tlb_t
*booke206_cur_tlb(CPUPPCState
*env
)
2515 uint32_t tlbncfg
= 0;
2516 int esel
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ESEL_MASK
) >> MAS0_ESEL_SHIFT
;
2517 int ea
= (env
->spr
[SPR_BOOKE_MAS2
] & MAS2_EPN_MASK
);
2520 tlb
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
2521 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlb
];
2523 if ((tlbncfg
& TLBnCFG_HES
) && (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_HES
)) {
2524 cpu_abort(env_cpu(env
), "we don't support HES yet\n");
2527 return booke206_get_tlbm(env
, tlb
, ea
, esel
);
2530 void helper_booke_setpid(CPUPPCState
*env
, uint32_t pidn
, target_ulong pid
)
2532 env
->spr
[pidn
] = pid
;
2533 /* changing PIDs mean we're in a different address space now */
2534 tlb_flush(env_cpu(env
));
2537 void helper_booke_set_eplc(CPUPPCState
*env
, target_ulong val
)
2539 env
->spr
[SPR_BOOKE_EPLC
] = val
& EPID_MASK
;
2540 tlb_flush_by_mmuidx(env_cpu(env
), 1 << PPC_TLB_EPID_LOAD
);
2542 void helper_booke_set_epsc(CPUPPCState
*env
, target_ulong val
)
2544 env
->spr
[SPR_BOOKE_EPSC
] = val
& EPID_MASK
;
2545 tlb_flush_by_mmuidx(env_cpu(env
), 1 << PPC_TLB_EPID_STORE
);
2548 static inline void flush_page(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
2550 if (booke206_tlb_to_page_size(env
, tlb
) == TARGET_PAGE_SIZE
) {
2551 tlb_flush_page(env_cpu(env
), tlb
->mas2
& MAS2_EPN_MASK
);
2553 tlb_flush(env_cpu(env
));
2557 void helper_booke206_tlbwe(CPUPPCState
*env
)
2559 uint32_t tlbncfg
, tlbn
;
2561 uint32_t size_tlb
, size_ps
;
2565 switch (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_WQ_MASK
) {
2566 case MAS0_WQ_ALWAYS
:
2567 /* good to go, write that entry */
2570 /* XXX check if reserved */
2575 case MAS0_WQ_CLR_RSRV
:
2576 /* XXX clear entry */
2579 /* no idea what to do */
2583 if (((env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ATSEL
) == MAS0_ATSEL_LRAT
) &&
2585 /* XXX we don't support direct LRAT setting yet */
2586 fprintf(stderr
, "cpu: don't support LRAT setting yet\n");
2590 tlbn
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
2591 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
2593 tlb
= booke206_cur_tlb(env
);
2596 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
2597 POWERPC_EXCP_INVAL
|
2598 POWERPC_EXCP_INVAL_INVAL
, GETPC());
2601 /* check that we support the targeted size */
2602 size_tlb
= (env
->spr
[SPR_BOOKE_MAS1
] & MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
2603 size_ps
= booke206_tlbnps(env
, tlbn
);
2604 if ((env
->spr
[SPR_BOOKE_MAS1
] & MAS1_VALID
) && (tlbncfg
& TLBnCFG_AVAIL
) &&
2605 !(size_ps
& (1 << size_tlb
))) {
2606 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
2607 POWERPC_EXCP_INVAL
|
2608 POWERPC_EXCP_INVAL_INVAL
, GETPC());
2612 cpu_abort(env_cpu(env
), "missing HV implementation\n");
2615 if (tlb
->mas1
& MAS1_VALID
) {
2617 * Invalidate the page in QEMU TLB if it was a valid entry.
2619 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
2620 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
2621 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
2623 * "Note that when an L2 TLB entry is written, it may be displacing an
2624 * already valid entry in the same L2 TLB location (a victim). If a
2625 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
2626 * TLB entry is automatically invalidated."
2628 flush_page(env
, tlb
);
2631 tlb
->mas7_3
= ((uint64_t)env
->spr
[SPR_BOOKE_MAS7
] << 32) |
2632 env
->spr
[SPR_BOOKE_MAS3
];
2633 tlb
->mas1
= env
->spr
[SPR_BOOKE_MAS1
];
2635 if ((env
->spr
[SPR_MMUCFG
] & MMUCFG_MAVN
) == MMUCFG_MAVN_V2
) {
2636 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
2637 booke206_fixed_size_tlbn(env
, tlbn
, tlb
);
2639 if (!(tlbncfg
& TLBnCFG_AVAIL
)) {
2640 /* force !AVAIL TLB entries to correct page size */
2641 tlb
->mas1
&= ~MAS1_TSIZE_MASK
;
2642 /* XXX can be configured in MMUCSR0 */
2643 tlb
->mas1
|= (tlbncfg
& TLBnCFG_MINSIZE
) >> 12;
2647 /* Make a mask from TLB size to discard invalid bits in EPN field */
2648 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
2649 /* Add a mask for page attributes */
2650 mask
|= MAS2_ACM
| MAS2_VLE
| MAS2_W
| MAS2_I
| MAS2_M
| MAS2_G
| MAS2_E
;
2654 * Executing a tlbwe instruction in 32-bit mode will set bits
2655 * 0:31 of the TLB EPN field to zero.
2660 tlb
->mas2
= env
->spr
[SPR_BOOKE_MAS2
] & mask
;
2662 if (!(tlbncfg
& TLBnCFG_IPROT
)) {
2663 /* no IPROT supported by TLB */
2664 tlb
->mas1
&= ~MAS1_IPROT
;
2667 flush_page(env
, tlb
);
2670 static inline void booke206_tlb_to_mas(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
2672 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
2673 int way
= booke206_tlbm_to_way(env
, tlb
);
2675 env
->spr
[SPR_BOOKE_MAS0
] = tlbn
<< MAS0_TLBSEL_SHIFT
;
2676 env
->spr
[SPR_BOOKE_MAS0
] |= way
<< MAS0_ESEL_SHIFT
;
2677 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
2679 env
->spr
[SPR_BOOKE_MAS1
] = tlb
->mas1
;
2680 env
->spr
[SPR_BOOKE_MAS2
] = tlb
->mas2
;
2681 env
->spr
[SPR_BOOKE_MAS3
] = tlb
->mas7_3
;
2682 env
->spr
[SPR_BOOKE_MAS7
] = tlb
->mas7_3
>> 32;
2685 void helper_booke206_tlbre(CPUPPCState
*env
)
2687 ppcmas_tlb_t
*tlb
= NULL
;
2689 tlb
= booke206_cur_tlb(env
);
2691 env
->spr
[SPR_BOOKE_MAS1
] = 0;
2693 booke206_tlb_to_mas(env
, tlb
);
2697 void helper_booke206_tlbsx(CPUPPCState
*env
, target_ulong address
)
2699 ppcmas_tlb_t
*tlb
= NULL
;
2704 spid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID_MASK
) >> MAS6_SPID_SHIFT
;
2705 sas
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
;
2707 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
2708 int ways
= booke206_tlb_ways(env
, i
);
2710 for (j
= 0; j
< ways
; j
++) {
2711 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
2717 if (ppcmas_tlb_check(env
, tlb
, &raddr
, address
, spid
)) {
2721 if (sas
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
2725 booke206_tlb_to_mas(env
, tlb
);
2730 /* no entry found, fill with defaults */
2731 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
2732 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
2733 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
2734 env
->spr
[SPR_BOOKE_MAS3
] = 0;
2735 env
->spr
[SPR_BOOKE_MAS7
] = 0;
2737 if (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
) {
2738 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
2741 env
->spr
[SPR_BOOKE_MAS1
] |= (env
->spr
[SPR_BOOKE_MAS6
] >> 16)
2744 /* next victim logic */
2745 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
2747 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
2748 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
2751 static inline void booke206_invalidate_ea_tlb(CPUPPCState
*env
, int tlbn
,
2755 int ways
= booke206_tlb_ways(env
, tlbn
);
2758 for (i
= 0; i
< ways
; i
++) {
2759 ppcmas_tlb_t
*tlb
= booke206_get_tlbm(env
, tlbn
, ea
, i
);
2763 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
2764 if (((tlb
->mas2
& MAS2_EPN_MASK
) == (ea
& mask
)) &&
2765 !(tlb
->mas1
& MAS1_IPROT
)) {
2766 tlb
->mas1
&= ~MAS1_VALID
;
2771 void helper_booke206_tlbivax(CPUPPCState
*env
, target_ulong address
)
2775 if (address
& 0x4) {
2776 /* flush all entries */
2777 if (address
& 0x8) {
2778 /* flush all of TLB1 */
2779 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB1
, 1);
2781 /* flush all of TLB0 */
2782 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB0
, 0);
2787 if (address
& 0x8) {
2788 /* flush TLB1 entries */
2789 booke206_invalidate_ea_tlb(env
, 1, address
);
2794 /* flush TLB0 entries */
2795 booke206_invalidate_ea_tlb(env
, 0, address
);
2797 tlb_flush_page(cs
, address
& MAS2_EPN_MASK
);
2802 void helper_booke206_tlbilx0(CPUPPCState
*env
, target_ulong address
)
2804 /* XXX missing LPID handling */
2805 booke206_flush_tlb(env
, -1, 1);
2808 void helper_booke206_tlbilx1(CPUPPCState
*env
, target_ulong address
)
2811 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
2812 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
2815 /* XXX missing LPID handling */
2816 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
2817 tlb_size
= booke206_tlb_size(env
, i
);
2818 for (j
= 0; j
< tlb_size
; j
++) {
2819 if (!(tlb
[j
].mas1
& MAS1_IPROT
) &&
2820 ((tlb
[j
].mas1
& MAS1_TID_MASK
) == tid
)) {
2821 tlb
[j
].mas1
&= ~MAS1_VALID
;
2824 tlb
+= booke206_tlb_size(env
, i
);
2826 tlb_flush(env_cpu(env
));
2829 void helper_booke206_tlbilx3(CPUPPCState
*env
, target_ulong address
)
2833 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
2834 int pid
= tid
>> MAS6_SPID_SHIFT
;
2835 int sgs
= env
->spr
[SPR_BOOKE_MAS5
] & MAS5_SGS
;
2836 int ind
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SIND
) ? MAS1_IND
: 0;
2837 /* XXX check for unsupported isize and raise an invalid opcode then */
2838 int size
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_ISIZE_MASK
;
2839 /* XXX implement MAV2 handling */
2842 /* XXX missing LPID handling */
2843 /* flush by pid and ea */
2844 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
2845 int ways
= booke206_tlb_ways(env
, i
);
2847 for (j
= 0; j
< ways
; j
++) {
2848 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
2852 if ((ppcmas_tlb_check(env
, tlb
, NULL
, address
, pid
) != 0) ||
2853 (tlb
->mas1
& MAS1_IPROT
) ||
2854 ((tlb
->mas1
& MAS1_IND
) != ind
) ||
2855 ((tlb
->mas8
& MAS8_TGS
) != sgs
)) {
2858 if (mav2
&& ((tlb
->mas1
& MAS1_TSIZE_MASK
) != size
)) {
2859 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
2862 /* XXX e500mc doesn't match SAS, but other cores might */
2863 tlb
->mas1
&= ~MAS1_VALID
;
2866 tlb_flush(env_cpu(env
));
2869 void helper_booke206_tlbflush(CPUPPCState
*env
, target_ulong type
)
2874 flags
|= BOOKE206_FLUSH_TLB1
;
2878 flags
|= BOOKE206_FLUSH_TLB0
;
2881 booke206_flush_tlb(env
, flags
, 1);
2885 void helper_check_tlb_flush_local(CPUPPCState
*env
)
2887 check_tlb_flush(env
, false);
2890 void helper_check_tlb_flush_global(CPUPPCState
*env
)
2892 check_tlb_flush(env
, true);
2894 #endif /* CONFIG_TCG */
2896 /*****************************************************************************/
2898 static bool ppc_xlate(PowerPCCPU
*cpu
, vaddr eaddr
, MMUAccessType access_type
,
2899 hwaddr
*raddrp
, int *psizep
, int *protp
,
2900 int mmu_idx
, bool guest_visible
)
2902 switch (cpu
->env
.mmu_model
) {
2903 #if defined(TARGET_PPC64)
2904 case POWERPC_MMU_3_00
:
2905 if (ppc64_v3_radix(cpu
)) {
2906 return ppc_radix64_xlate(cpu
, eaddr
, access_type
,
2907 raddrp
, psizep
, protp
, mmu_idx
, guest_visible
);
2910 case POWERPC_MMU_64B
:
2911 case POWERPC_MMU_2_03
:
2912 case POWERPC_MMU_2_06
:
2913 case POWERPC_MMU_2_07
:
2914 return ppc_hash64_xlate(cpu
, eaddr
, access_type
,
2915 raddrp
, psizep
, protp
, mmu_idx
, guest_visible
);
2918 case POWERPC_MMU_32B
:
2919 case POWERPC_MMU_601
:
2920 return ppc_hash32_xlate(cpu
, eaddr
, access_type
,
2921 raddrp
, psizep
, protp
, mmu_idx
, guest_visible
);
2924 return ppc_jumbo_xlate(cpu
, eaddr
, access_type
, raddrp
,
2925 psizep
, protp
, mmu_idx
, guest_visible
);
2929 hwaddr
ppc_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
2931 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
2936 * Some MMUs have separate TLBs for code and data. If we only
2937 * try an MMU_DATA_LOAD, we may not be able to read instructions
2938 * mapped by code TLBs, so we also try a MMU_INST_FETCH.
2940 if (ppc_xlate(cpu
, addr
, MMU_DATA_LOAD
, &raddr
, &s
, &p
,
2941 cpu_mmu_index(&cpu
->env
, false), false) ||
2942 ppc_xlate(cpu
, addr
, MMU_INST_FETCH
, &raddr
, &s
, &p
,
2943 cpu_mmu_index(&cpu
->env
, true), false)) {
2944 return raddr
& TARGET_PAGE_MASK
;
2950 bool ppc_cpu_tlb_fill(CPUState
*cs
, vaddr eaddr
, int size
,
2951 MMUAccessType access_type
, int mmu_idx
,
2952 bool probe
, uintptr_t retaddr
)
2954 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
2956 int page_size
, prot
;
2958 if (ppc_xlate(cpu
, eaddr
, access_type
, &raddr
,
2959 &page_size
, &prot
, mmu_idx
, !probe
)) {
2960 tlb_set_page(cs
, eaddr
& TARGET_PAGE_MASK
, raddr
& TARGET_PAGE_MASK
,
2961 prot
, mmu_idx
, 1UL << page_size
);
2967 raise_exception_err_ra(&cpu
->env
, cs
->exception_index
,
2968 cpu
->env
.error_code
, retaddr
);