2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 //#define DEBUG_SOFTWARE_TLB
28 //#define DUMP_PAGE_TABLES
29 //#define DEBUG_SOFTWARE_TLB
30 //#define FLUSH_ALL_TLBS
33 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
34 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
36 # define LOG_MMU(...) do { } while (0)
37 # define LOG_MMU_STATE(...) do { } while (0)
40 #ifdef DEBUG_SOFTWARE_TLB
41 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
43 # define LOG_SWTLB(...) do { } while (0)
47 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
49 # define LOG_BATS(...) do { } while (0)
53 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
55 # define LOG_SLB(...) do { } while (0)
58 /*****************************************************************************/
59 /* PowerPC MMU emulation */
60 #if defined(CONFIG_USER_ONLY)
61 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
64 int exception
, error_code
;
67 exception
= POWERPC_EXCP_ISI
;
68 error_code
= 0x40000000;
70 exception
= POWERPC_EXCP_DSI
;
71 error_code
= 0x40000000;
73 error_code
|= 0x02000000;
75 env
->spr
[SPR_DAR
] = address
;
76 env
->spr
[SPR_DSISR
] = error_code
;
78 env
->exception_index
= exception
;
79 env
->error_code
= error_code
;
85 /* Common routines used by software and hardware TLBs emulation */
86 static inline int pte_is_valid(target_ulong pte0
)
88 return pte0
& 0x80000000 ? 1 : 0;
91 static inline void pte_invalidate(target_ulong
*pte0
)
96 #if defined(TARGET_PPC64)
97 static inline int pte64_is_valid(target_ulong pte0
)
99 return pte0
& 0x0000000000000001ULL
? 1 : 0;
102 static inline void pte64_invalidate(target_ulong
*pte0
)
104 *pte0
&= ~0x0000000000000001ULL
;
108 #define PTE_PTEM_MASK 0x7FFFFFBF
109 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
110 #if defined(TARGET_PPC64)
111 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
112 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
115 static inline int pp_check(int key
, int pp
, int nx
)
119 /* Compute access rights */
120 /* When pp is 3/7, the result is undefined. Set it to noaccess */
127 access
|= PAGE_WRITE
;
145 access
= PAGE_READ
| PAGE_WRITE
;
156 static inline int check_prot(int prot
, int rw
, int access_type
)
160 if (access_type
== ACCESS_CODE
) {
161 if (prot
& PAGE_EXEC
) {
167 if (prot
& PAGE_WRITE
) {
173 if (prot
& PAGE_READ
) {
183 static inline int pte_check(mmu_ctx_t
*ctx
, int is_64b
, target_ulong pte0
,
184 target_ulong pte1
, int h
, int rw
, int type
)
186 target_ulong ptem
, mmask
;
187 int access
, ret
, pteh
, ptev
, pp
;
190 /* Check validity and table match */
191 #if defined(TARGET_PPC64)
193 ptev
= pte64_is_valid(pte0
);
194 pteh
= (pte0
>> 1) & 1;
198 ptev
= pte_is_valid(pte0
);
199 pteh
= (pte0
>> 6) & 1;
201 if (ptev
&& h
== pteh
) {
202 /* Check vsid & api */
203 #if defined(TARGET_PPC64)
205 ptem
= pte0
& PTE64_PTEM_MASK
;
206 mmask
= PTE64_CHECK_MASK
;
207 pp
= (pte1
& 0x00000003) | ((pte1
>> 61) & 0x00000004);
208 ctx
->nx
= (pte1
>> 2) & 1; /* No execute bit */
209 ctx
->nx
|= (pte1
>> 3) & 1; /* Guarded bit */
213 ptem
= pte0
& PTE_PTEM_MASK
;
214 mmask
= PTE_CHECK_MASK
;
215 pp
= pte1
& 0x00000003;
217 if (ptem
== ctx
->ptem
) {
218 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
219 /* all matches should have equal RPN, WIMG & PP */
220 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
221 qemu_log("Bad RPN/WIMG/PP\n");
225 /* Compute access rights */
226 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
227 /* Keep the matching PTE informations */
230 ret
= check_prot(ctx
->prot
, rw
, type
);
233 LOG_MMU("PTE access granted !\n");
235 /* Access right violation */
236 LOG_MMU("PTE access rejected\n");
244 static inline int pte32_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
245 target_ulong pte1
, int h
, int rw
, int type
)
247 return pte_check(ctx
, 0, pte0
, pte1
, h
, rw
, type
);
250 #if defined(TARGET_PPC64)
251 static inline int pte64_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
252 target_ulong pte1
, int h
, int rw
, int type
)
254 return pte_check(ctx
, 1, pte0
, pte1
, h
, rw
, type
);
258 static inline int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
263 /* Update page flags */
264 if (!(*pte1p
& 0x00000100)) {
265 /* Update accessed flag */
266 *pte1p
|= 0x00000100;
269 if (!(*pte1p
& 0x00000080)) {
270 if (rw
== 1 && ret
== 0) {
271 /* Update changed flag */
272 *pte1p
|= 0x00000080;
275 /* Force page fault for first write access */
276 ctx
->prot
&= ~PAGE_WRITE
;
283 /* Software driven TLB helpers */
284 static inline int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
285 int way
, int is_code
)
289 /* Select TLB num in a way from address */
290 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
292 nr
+= env
->tlb_per_way
* way
;
293 /* 6xx have separate TLBs for instructions and data */
294 if (is_code
&& env
->id_tlbs
== 1) {
301 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState
*env
)
306 /* LOG_SWTLB("Invalidate all TLBs\n"); */
307 /* Invalidate all defined software TLB */
309 if (env
->id_tlbs
== 1) {
312 for (nr
= 0; nr
< max
; nr
++) {
313 tlb
= &env
->tlb
.tlb6
[nr
];
314 pte_invalidate(&tlb
->pte0
);
319 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState
*env
,
321 int is_code
, int match_epn
)
323 #if !defined(FLUSH_ALL_TLBS)
327 /* Invalidate ITLB + DTLB, all ways */
328 for (way
= 0; way
< env
->nb_ways
; way
++) {
329 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
330 tlb
= &env
->tlb
.tlb6
[nr
];
331 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
332 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
334 pte_invalidate(&tlb
->pte0
);
335 tlb_flush_page(env
, tlb
->EPN
);
339 /* XXX: PowerPC specification say this is valid as well */
340 ppc6xx_tlb_invalidate_all(env
);
344 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState
*env
,
345 target_ulong eaddr
, int is_code
)
347 ppc6xx_tlb_invalidate_virt2(env
, eaddr
, is_code
, 0);
350 static void ppc6xx_tlb_store(CPUPPCState
*env
, target_ulong EPN
, int way
,
351 int is_code
, target_ulong pte0
, target_ulong pte1
)
356 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
357 tlb
= &env
->tlb
.tlb6
[nr
];
358 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
359 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
360 /* Invalidate any pending reference in QEMU for this virtual address */
361 ppc6xx_tlb_invalidate_virt2(env
, EPN
, is_code
, 1);
365 /* Store last way for LRU mechanism */
369 static inline int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
370 target_ulong eaddr
, int rw
, int access_type
)
377 ret
= -1; /* No TLB found */
378 for (way
= 0; way
< env
->nb_ways
; way
++) {
379 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
,
380 access_type
== ACCESS_CODE
? 1 : 0);
381 tlb
= &env
->tlb
.tlb6
[nr
];
382 /* This test "emulates" the PTE index match for hardware TLBs */
383 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
384 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
385 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
386 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
387 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
390 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
391 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
392 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
393 tlb
->EPN
, eaddr
, tlb
->pte1
,
394 rw
? 'S' : 'L', access_type
== ACCESS_CODE
? 'I' : 'D');
395 switch (pte32_check(ctx
, tlb
->pte0
, tlb
->pte1
, 0, rw
, access_type
)) {
397 /* TLB inconsistency */
400 /* Access violation */
410 /* XXX: we should go on looping to check all TLBs consistency
411 * but we can speed-up the whole thing as the
412 * result would be undefined if TLBs are not consistent.
421 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
422 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
423 /* Update page flags */
424 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, rw
);
430 /* Perform BAT hit & translation */
431 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
432 int *validp
, int *protp
, target_ulong
*BATu
,
438 bl
= (*BATu
& 0x00001FFC) << 15;
441 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
442 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
444 pp
= *BATl
& 0x00000003;
446 prot
= PAGE_READ
| PAGE_EXEC
;
457 static inline void bat_601_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
458 int *validp
, int *protp
,
459 target_ulong
*BATu
, target_ulong
*BATl
)
462 int key
, pp
, valid
, prot
;
464 bl
= (*BATl
& 0x0000003F) << 17;
465 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx
" msk " TARGET_FMT_lx
"\n",
466 (uint8_t)(*BATl
& 0x0000003F), bl
, ~bl
);
468 valid
= (*BATl
>> 6) & 1;
470 pp
= *BATu
& 0x00000003;
472 key
= (*BATu
>> 3) & 1;
474 key
= (*BATu
>> 2) & 1;
476 prot
= pp_check(key
, pp
, 0);
483 static inline int get_bat(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
484 target_ulong
virtual, int rw
, int type
)
486 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
487 target_ulong BEPIl
, BEPIu
, bl
;
491 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
492 type
== ACCESS_CODE
? 'I' : 'D', virtual);
495 BATlt
= env
->IBAT
[1];
496 BATut
= env
->IBAT
[0];
499 BATlt
= env
->DBAT
[1];
500 BATut
= env
->DBAT
[0];
503 for (i
= 0; i
< env
->nb_BATs
; i
++) {
506 BEPIu
= *BATu
& 0xF0000000;
507 BEPIl
= *BATu
& 0x0FFE0000;
508 if (unlikely(env
->mmu_model
== POWERPC_MMU_601
)) {
509 bat_601_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
511 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
513 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
514 " BATl " TARGET_FMT_lx
"\n", __func__
,
515 type
== ACCESS_CODE
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
516 if ((virtual & 0xF0000000) == BEPIu
&&
517 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
520 /* Get physical address */
521 ctx
->raddr
= (*BATl
& 0xF0000000) |
522 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
523 (virtual & 0x0001F000);
524 /* Compute access rights */
526 ret
= check_prot(ctx
->prot
, rw
, type
);
528 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
529 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
530 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
537 #if defined(DEBUG_BATS)
538 if (qemu_log_enabled()) {
539 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
540 for (i
= 0; i
< 4; i
++) {
543 BEPIu
= *BATu
& 0xF0000000;
544 BEPIl
= *BATu
& 0x0FFE0000;
545 bl
= (*BATu
& 0x00001FFC) << 15;
546 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
547 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
548 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
549 __func__
, type
== ACCESS_CODE
? 'I' : 'D', i
, virtual,
550 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
559 static inline hwaddr
get_pteg_offset(CPUPPCState
*env
,
563 return (hash
* pte_size
* 8) & env
->htab_mask
;
566 /* PTE table lookup */
567 static inline int find_pte2(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int is_64b
, int h
,
568 int rw
, int type
, int target_page_bits
)
571 target_ulong pte0
, pte1
;
575 ret
= -1; /* No entry found */
576 pteg_off
= get_pteg_offset(env
, ctx
->hash
[h
],
577 is_64b
? HASH_PTE_SIZE_64
: HASH_PTE_SIZE_32
);
578 for (i
= 0; i
< 8; i
++) {
579 #if defined(TARGET_PPC64)
581 if (env
->external_htab
) {
582 pte0
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16));
583 pte1
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16) + 8);
585 pte0
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16));
586 pte1
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16) + 8);
589 r
= pte64_check(ctx
, pte0
, pte1
, h
, rw
, type
);
590 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
591 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
592 pteg_off
+ (i
* 16), pte0
, pte1
, (int)(pte0
& 1), h
,
593 (int)((pte0
>> 1) & 1), ctx
->ptem
);
597 if (env
->external_htab
) {
598 pte0
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8));
599 pte1
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8) + 4);
601 pte0
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8));
602 pte1
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8) + 4);
604 r
= pte32_check(ctx
, pte0
, pte1
, h
, rw
, type
);
605 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
606 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
607 pteg_off
+ (i
* 8), pte0
, pte1
, (int)(pte0
>> 31), h
,
608 (int)((pte0
>> 6) & 1), ctx
->ptem
);
612 /* PTE inconsistency */
615 /* Access violation */
625 /* XXX: we should go on looping to check all PTEs consistency
626 * but if we can speed-up the whole thing as the
627 * result would be undefined if PTEs are not consistent.
636 LOG_MMU("found PTE at addr " TARGET_FMT_lx
" prot=%01x ret=%d\n",
637 ctx
->raddr
, ctx
->prot
, ret
);
638 /* Update page flags */
640 if (pte_update_flags(ctx
, &pte1
, ret
, rw
) == 1) {
641 #if defined(TARGET_PPC64)
643 if (env
->external_htab
) {
644 stq_p(env
->external_htab
+ pteg_off
+ (good
* 16) + 8,
647 stq_phys_notdirty(env
->htab_base
+ pteg_off
+
648 (good
* 16) + 8, pte1
);
653 if (env
->external_htab
) {
654 stl_p(env
->external_htab
+ pteg_off
+ (good
* 8) + 4,
657 stl_phys_notdirty(env
->htab_base
+ pteg_off
+
658 (good
* 8) + 4, pte1
);
664 /* We have a TLB that saves 4K pages, so let's
665 * split a huge page to 4k chunks */
666 if (target_page_bits
!= TARGET_PAGE_BITS
) {
667 ctx
->raddr
|= (ctx
->eaddr
& ((1 << target_page_bits
) - 1))
673 static inline int find_pte(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int h
, int rw
,
674 int type
, int target_page_bits
)
676 #if defined(TARGET_PPC64)
677 if (env
->mmu_model
& POWERPC_MMU_64
) {
678 return find_pte2(env
, ctx
, 1, h
, rw
, type
, target_page_bits
);
682 return find_pte2(env
, ctx
, 0, h
, rw
, type
, target_page_bits
);
685 #if defined(TARGET_PPC64)
686 static inline ppc_slb_t
*slb_lookup(CPUPPCState
*env
, target_ulong eaddr
)
688 uint64_t esid_256M
, esid_1T
;
691 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
693 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
694 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
696 for (n
= 0; n
< env
->slb_nr
; n
++) {
697 ppc_slb_t
*slb
= &env
->slb
[n
];
699 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
700 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
701 /* We check for 1T matches on all MMUs here - if the MMU
702 * doesn't have 1T segment support, we will have prevented 1T
703 * entries from being inserted in the slbmte code. */
704 if (((slb
->esid
== esid_256M
) &&
705 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
706 || ((slb
->esid
== esid_1T
) &&
707 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
715 /*****************************************************************************/
718 void helper_slbia(CPUPPCState
*env
)
720 int n
, do_invalidate
;
723 /* XXX: Warning: slbia never invalidates the first segment */
724 for (n
= 1; n
< env
->slb_nr
; n
++) {
725 ppc_slb_t
*slb
= &env
->slb
[n
];
727 if (slb
->esid
& SLB_ESID_V
) {
728 slb
->esid
&= ~SLB_ESID_V
;
729 /* XXX: given the fact that segment size is 256 MB or 1TB,
730 * and we still don't have a tlb_flush_mask(env, n, mask)
731 * in QEMU, we just invalidate all TLBs
741 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
745 slb
= slb_lookup(env
, addr
);
750 if (slb
->esid
& SLB_ESID_V
) {
751 slb
->esid
&= ~SLB_ESID_V
;
753 /* XXX: given the fact that segment size is 256 MB or 1TB,
754 * and we still don't have a tlb_flush_mask(env, n, mask)
755 * in QEMU, we just invalidate all TLBs
761 int ppc_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
763 int slot
= rb
& 0xfff;
764 ppc_slb_t
*slb
= &env
->slb
[slot
];
766 if (rb
& (0x1000 - env
->slb_nr
)) {
767 return -1; /* Reserved bits set or slot too high */
769 if (rs
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
770 return -1; /* Bad segment size */
772 if ((rs
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
773 return -1; /* 1T segment on MMU that doesn't support it */
776 /* Mask out the slot number as we store the entry */
777 slb
->esid
= rb
& (SLB_ESID_ESID
| SLB_ESID_V
);
780 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
781 " %016" PRIx64
"\n", __func__
, slot
, rb
, rs
,
782 slb
->esid
, slb
->vsid
);
787 static int ppc_load_slb_esid(CPUPPCState
*env
, target_ulong rb
,
790 int slot
= rb
& 0xfff;
791 ppc_slb_t
*slb
= &env
->slb
[slot
];
793 if (slot
>= env
->slb_nr
) {
801 static int ppc_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
,
804 int slot
= rb
& 0xfff;
805 ppc_slb_t
*slb
= &env
->slb
[slot
];
807 if (slot
>= env
->slb_nr
) {
814 #endif /* defined(TARGET_PPC64) */
816 /* Perform segment based translation */
817 static inline int get_segment(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
818 target_ulong eaddr
, int rw
, int type
)
822 int ds
, pr
, target_page_bits
;
827 #if defined(TARGET_PPC64)
828 if (env
->mmu_model
& POWERPC_MMU_64
) {
830 target_ulong pageaddr
;
833 LOG_MMU("Check SLBs\n");
834 slb
= slb_lookup(env
, eaddr
);
839 if (slb
->vsid
& SLB_VSID_B
) {
840 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
843 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
847 target_page_bits
= (slb
->vsid
& SLB_VSID_L
)
848 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
849 ctx
->key
= !!(pr
? (slb
->vsid
& SLB_VSID_KP
)
850 : (slb
->vsid
& SLB_VSID_KS
));
852 ctx
->nx
= !!(slb
->vsid
& SLB_VSID_N
);
854 pageaddr
= eaddr
& ((1ULL << segment_bits
)
855 - (1ULL << target_page_bits
));
856 if (slb
->vsid
& SLB_VSID_B
) {
857 hash
= vsid
^ (vsid
<< 25) ^ (pageaddr
>> target_page_bits
);
859 hash
= vsid
^ (pageaddr
>> target_page_bits
);
861 /* Only 5 bits of the page index are used in the AVPN */
862 ctx
->ptem
= (slb
->vsid
& SLB_VSID_PTEM
) |
863 ((pageaddr
>> 16) & ((1ULL << segment_bits
) - 0x80));
865 #endif /* defined(TARGET_PPC64) */
867 target_ulong sr
, pgidx
;
869 sr
= env
->sr
[eaddr
>> 28];
870 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
871 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
872 ds
= sr
& 0x80000000 ? 1 : 0;
873 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
874 vsid
= sr
& 0x00FFFFFF;
875 target_page_bits
= TARGET_PAGE_BITS
;
876 LOG_MMU("Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
" nip="
877 TARGET_FMT_lx
" lr=" TARGET_FMT_lx
878 " ir=%d dr=%d pr=%d %d t=%d\n",
879 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
880 (int)msr_dr
, pr
!= 0 ? 1 : 0, rw
, type
);
881 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
883 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
885 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
886 ctx
->key
, ds
, ctx
->nx
, vsid
);
889 /* Check if instruction fetch is allowed, if needed */
890 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
891 /* Page address translation */
892 LOG_MMU("htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
893 " hash " TARGET_FMT_plx
"\n",
894 env
->htab_base
, env
->htab_mask
, hash
);
896 ctx
->hash
[1] = ~hash
;
898 /* Initialize real address with an invalid value */
899 ctx
->raddr
= (hwaddr
)-1ULL;
900 if (unlikely(env
->mmu_model
== POWERPC_MMU_SOFT_6xx
||
901 env
->mmu_model
== POWERPC_MMU_SOFT_74xx
)) {
902 /* Software TLB search */
903 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, rw
, type
);
905 LOG_MMU("0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
906 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
907 " hash=" TARGET_FMT_plx
"\n",
908 env
->htab_base
, env
->htab_mask
, vsid
, ctx
->ptem
,
910 /* Primary table lookup */
911 ret
= find_pte(env
, ctx
, 0, rw
, type
, target_page_bits
);
913 /* Secondary table lookup */
914 if (eaddr
!= 0xEFFFFFFF) {
915 LOG_MMU("1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
916 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
917 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
918 env
->htab_mask
, vsid
, ctx
->ptem
, ctx
->hash
[1]);
920 ret2
= find_pte(env
, ctx
, 1, rw
, type
,
927 #if defined(DUMP_PAGE_TABLES)
928 if (qemu_log_enabled()) {
930 uint32_t a0
, a1
, a2
, a3
;
932 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
933 "\n", sdr
, mask
+ 0x80);
934 for (curaddr
= sdr
; curaddr
< (sdr
+ mask
+ 0x80);
936 a0
= ldl_phys(curaddr
);
937 a1
= ldl_phys(curaddr
+ 4);
938 a2
= ldl_phys(curaddr
+ 8);
939 a3
= ldl_phys(curaddr
+ 12);
940 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
941 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
942 curaddr
, a0
, a1
, a2
, a3
);
948 LOG_MMU("No access allowed\n");
954 LOG_MMU("direct store...\n");
955 /* Direct-store segment : absolutely *BUGGY* for now */
957 /* Direct-store implies a 32-bit MMU.
958 * Check the Segment Register's bus unit ID (BUID).
960 sr
= env
->sr
[eaddr
>> 28];
961 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
962 /* Memory-forced I/O controller interface access */
963 /* If T=1 and BUID=x'07F', the 601 performs a memory access
964 * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
966 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
967 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
973 /* Integer load/store : only access allowed */
976 /* No code fetch is allowed in direct-store areas */
979 /* Floating point load/store */
982 /* lwarx, ldarx or srwcx. */
985 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
986 /* Should make the instruction do no-op.
987 * As it already do no-op, it's quite easy :-)
995 qemu_log("ERROR: instruction should not need "
996 "address translation\n");
999 if ((rw
== 1 || ctx
->key
!= 1) && (rw
== 0 || ctx
->key
!= 0)) {
1010 /* Generic TLB check function for embedded PowerPC implementations */
1011 static int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1013 target_ulong address
, uint32_t pid
, int ext
,
1018 /* Check valid flag */
1019 if (!(tlb
->prot
& PAGE_VALID
)) {
1022 mask
= ~(tlb
->size
- 1);
1023 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
1024 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
1025 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
1027 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
1030 /* Check effective address */
1031 if ((address
& mask
) != tlb
->EPN
) {
1034 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
1036 /* Extend the physical address to 36 bits */
1037 *raddrp
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
1043 /* Generic TLB search function for PowerPC embedded implementations */
1044 static int ppcemb_tlb_search(CPUPPCState
*env
, target_ulong address
,
1051 /* Default return value is no match */
1053 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1054 tlb
= &env
->tlb
.tlbe
[i
];
1055 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
1064 /* Helpers specific to PowerPC 40x implementations */
1065 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState
*env
)
1070 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1071 tlb
= &env
->tlb
.tlbe
[i
];
1072 tlb
->prot
&= ~PAGE_VALID
;
1077 static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState
*env
,
1078 target_ulong eaddr
, uint32_t pid
)
1080 #if !defined(FLUSH_ALL_TLBS)
1083 target_ulong page
, end
;
1086 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1087 tlb
= &env
->tlb
.tlbe
[i
];
1088 if (ppcemb_tlb_check(env
, tlb
, &raddr
, eaddr
, pid
, 0, i
) == 0) {
1089 end
= tlb
->EPN
+ tlb
->size
;
1090 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
1091 tlb_flush_page(env
, page
);
1093 tlb
->prot
&= ~PAGE_VALID
;
1098 ppc4xx_tlb_invalidate_all(env
);
1102 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1103 target_ulong address
, int rw
,
1108 int i
, ret
, zsel
, zpr
, pr
;
1111 raddr
= (hwaddr
)-1ULL;
1113 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1114 tlb
= &env
->tlb
.tlbe
[i
];
1115 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
1116 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
1119 zsel
= (tlb
->attr
>> 4) & 0xF;
1120 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
1121 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1122 __func__
, i
, zsel
, zpr
, rw
, tlb
->attr
);
1123 /* Check execute enable bit */
1131 /* All accesses granted */
1132 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1137 /* Raise Zone protection fault. */
1138 env
->spr
[SPR_40x_ESR
] = 1 << 22;
1146 /* Check from TLB entry */
1147 ctx
->prot
= tlb
->prot
;
1148 ret
= check_prot(ctx
->prot
, rw
, access_type
);
1150 env
->spr
[SPR_40x_ESR
] = 0;
1156 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1157 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1162 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1163 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1168 void store_40x_sler(CPUPPCState
*env
, uint32_t val
)
1170 /* XXX: TO BE FIXED */
1171 if (val
!= 0x00000000) {
1172 cpu_abort(env
, "Little-endian regions are not supported by now\n");
1174 env
->spr
[SPR_405_SLER
] = val
;
1177 static inline int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1178 hwaddr
*raddr
, int *prot
,
1179 target_ulong address
, int rw
,
1180 int access_type
, int i
)
1184 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1185 env
->spr
[SPR_BOOKE_PID
],
1186 !env
->nb_pids
, i
) >= 0) {
1190 if (env
->spr
[SPR_BOOKE_PID1
] &&
1191 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1192 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
1196 if (env
->spr
[SPR_BOOKE_PID2
] &&
1197 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1198 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
1202 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1208 prot2
= tlb
->prot
& 0xF;
1210 prot2
= (tlb
->prot
>> 4) & 0xF;
1213 /* Check the address space */
1214 if (access_type
== ACCESS_CODE
) {
1215 if (msr_ir
!= (tlb
->attr
& 1)) {
1216 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1221 if (prot2
& PAGE_EXEC
) {
1222 LOG_SWTLB("%s: good TLB!\n", __func__
);
1226 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1229 if (msr_dr
!= (tlb
->attr
& 1)) {
1230 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1235 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1236 LOG_SWTLB("%s: found TLB!\n", __func__
);
1240 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1247 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1248 target_ulong address
, int rw
,
1256 raddr
= (hwaddr
)-1ULL;
1257 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1258 tlb
= &env
->tlb
.tlbe
[i
];
1259 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
, rw
,
1268 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1269 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1272 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1273 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1279 static void booke206_flush_tlb(CPUPPCState
*env
, int flags
,
1280 const int check_iprot
)
1284 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
1286 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1287 if (flags
& (1 << i
)) {
1288 tlb_size
= booke206_tlb_size(env
, i
);
1289 for (j
= 0; j
< tlb_size
; j
++) {
1290 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
1291 tlb
[j
].mas1
&= ~MAS1_VALID
;
1295 tlb
+= booke206_tlb_size(env
, i
);
1301 static hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
,
1306 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1308 return 1024ULL << tlbm_size
;
1311 /* TLB check function for MAS based SoftTLBs */
1312 int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1314 target_ulong address
, uint32_t pid
)
1319 /* Check valid flag */
1320 if (!(tlb
->mas1
& MAS1_VALID
)) {
1324 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
1325 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
1326 PRIx64
" mask=0x" TARGET_FMT_lx
" MAS7_3=0x%" PRIx64
" MAS8=%x\n",
1327 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
, tlb
->mas7_3
,
1331 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
1332 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
1336 /* Check effective address */
1337 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
1342 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
1348 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1349 hwaddr
*raddr
, int *prot
,
1350 target_ulong address
, int rw
,
1356 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1357 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
1361 if (env
->spr
[SPR_BOOKE_PID1
] &&
1362 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1363 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
1367 if (env
->spr
[SPR_BOOKE_PID2
] &&
1368 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1369 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
1373 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1379 if (tlb
->mas7_3
& MAS3_UR
) {
1382 if (tlb
->mas7_3
& MAS3_UW
) {
1383 prot2
|= PAGE_WRITE
;
1385 if (tlb
->mas7_3
& MAS3_UX
) {
1389 if (tlb
->mas7_3
& MAS3_SR
) {
1392 if (tlb
->mas7_3
& MAS3_SW
) {
1393 prot2
|= PAGE_WRITE
;
1395 if (tlb
->mas7_3
& MAS3_SX
) {
1400 /* Check the address space and permissions */
1401 if (access_type
== ACCESS_CODE
) {
1402 if (msr_ir
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1403 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1408 if (prot2
& PAGE_EXEC
) {
1409 LOG_SWTLB("%s: good TLB!\n", __func__
);
1413 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1416 if (msr_dr
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1417 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1422 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1423 LOG_SWTLB("%s: found TLB!\n", __func__
);
1427 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1434 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1435 target_ulong address
, int rw
,
1443 raddr
= (hwaddr
)-1ULL;
1445 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1446 int ways
= booke206_tlb_ways(env
, i
);
1448 for (j
= 0; j
< ways
; j
++) {
1449 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1453 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1465 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1466 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1469 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1470 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1476 static const char *book3e_tsize_to_str
[32] = {
1477 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1478 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1479 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1483 static void mmubooke_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1486 ppcemb_tlb_t
*entry
;
1489 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1490 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1494 cpu_fprintf(f
, "\nTLB:\n");
1495 cpu_fprintf(f
, "Effective Physical Size PID Prot "
1498 entry
= &env
->tlb
.tlbe
[0];
1499 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
1502 uint64_t size
= (uint64_t)entry
->size
;
1505 /* Check valid flag */
1506 if (!(entry
->prot
& PAGE_VALID
)) {
1510 mask
= ~(entry
->size
- 1);
1511 ea
= entry
->EPN
& mask
;
1512 pa
= entry
->RPN
& mask
;
1513 /* Extend the physical address to 36 bits */
1514 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
1517 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ 1024);
1519 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
);
1521 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
1522 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
1523 entry
->prot
, entry
->attr
);
1528 static void mmubooke206_dump_one_tlb(FILE *f
, fprintf_function cpu_fprintf
,
1529 CPUPPCState
*env
, int tlbn
, int offset
,
1532 ppcmas_tlb_t
*entry
;
1535 cpu_fprintf(f
, "\nTLB%d:\n", tlbn
);
1536 cpu_fprintf(f
, "Effective Physical Size TID TS SRWX"
1537 " URWX WIMGE U0123\n");
1539 entry
= &env
->tlb
.tlbm
[offset
];
1540 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
1541 hwaddr ea
, pa
, size
;
1544 if (!(entry
->mas1
& MAS1_VALID
)) {
1548 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1549 size
= 1024ULL << tsize
;
1550 ea
= entry
->mas2
& ~(size
- 1);
1551 pa
= entry
->mas7_3
& ~(size
- 1);
1553 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
1554 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1555 (uint64_t)ea
, (uint64_t)pa
,
1556 book3e_tsize_to_str
[tsize
],
1557 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1558 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1559 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1560 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1561 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1562 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1563 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1564 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1565 entry
->mas2
& MAS2_W
? 'W' : '-',
1566 entry
->mas2
& MAS2_I
? 'I' : '-',
1567 entry
->mas2
& MAS2_M
? 'M' : '-',
1568 entry
->mas2
& MAS2_G
? 'G' : '-',
1569 entry
->mas2
& MAS2_E
? 'E' : '-',
1570 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1571 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1572 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1573 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1577 static void mmubooke206_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1583 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1584 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1588 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1589 int size
= booke206_tlb_size(env
, i
);
1595 mmubooke206_dump_one_tlb(f
, cpu_fprintf
, env
, i
, offset
, size
);
1600 #if defined(TARGET_PPC64)
1601 static void mmubooks_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1605 uint64_t slbe
, slbv
;
1607 cpu_synchronize_state(env
);
1609 cpu_fprintf(f
, "SLB\tESID\t\t\tVSID\n");
1610 for (i
= 0; i
< env
->slb_nr
; i
++) {
1611 slbe
= env
->slb
[i
].esid
;
1612 slbv
= env
->slb
[i
].vsid
;
1613 if (slbe
== 0 && slbv
== 0) {
1616 cpu_fprintf(f
, "%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
1622 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUPPCState
*env
)
1624 switch (env
->mmu_model
) {
1625 case POWERPC_MMU_BOOKE
:
1626 mmubooke_dump_mmu(f
, cpu_fprintf
, env
);
1628 case POWERPC_MMU_BOOKE206
:
1629 mmubooke206_dump_mmu(f
, cpu_fprintf
, env
);
1631 #if defined(TARGET_PPC64)
1632 case POWERPC_MMU_64B
:
1633 case POWERPC_MMU_2_06
:
1634 case POWERPC_MMU_2_06d
:
1635 mmubooks_dump_mmu(f
, cpu_fprintf
, env
);
1639 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1643 static inline int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1644 target_ulong eaddr
, int rw
)
1649 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1651 switch (env
->mmu_model
) {
1652 case POWERPC_MMU_32B
:
1653 case POWERPC_MMU_601
:
1654 case POWERPC_MMU_SOFT_6xx
:
1655 case POWERPC_MMU_SOFT_74xx
:
1656 case POWERPC_MMU_SOFT_4xx
:
1657 case POWERPC_MMU_REAL
:
1658 case POWERPC_MMU_BOOKE
:
1659 ctx
->prot
|= PAGE_WRITE
;
1661 #if defined(TARGET_PPC64)
1662 case POWERPC_MMU_620
:
1663 case POWERPC_MMU_64B
:
1664 case POWERPC_MMU_2_06
:
1665 case POWERPC_MMU_2_06d
:
1666 /* Real address are 60 bits long */
1667 ctx
->raddr
&= 0x0FFFFFFFFFFFFFFFULL
;
1668 ctx
->prot
|= PAGE_WRITE
;
1671 case POWERPC_MMU_SOFT_4xx_Z
:
1672 if (unlikely(msr_pe
!= 0)) {
1673 /* 403 family add some particular protections,
1674 * using PBL/PBU registers for accesses with no translation.
1677 /* Check PLB validity */
1678 (env
->pb
[0] < env
->pb
[1] &&
1679 /* and address in plb area */
1680 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1681 (env
->pb
[2] < env
->pb
[3] &&
1682 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1683 if (in_plb
^ msr_px
) {
1684 /* Access in protected area */
1686 /* Access is not allowed */
1690 /* Read-write access is allowed */
1691 ctx
->prot
|= PAGE_WRITE
;
1695 case POWERPC_MMU_MPC8xx
:
1697 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1699 case POWERPC_MMU_BOOKE206
:
1700 cpu_abort(env
, "BookE 2.06 MMU doesn't have physical real mode\n");
1703 cpu_abort(env
, "Unknown or invalid MMU model\n");
1710 static int get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1711 target_ulong eaddr
, int rw
, int access_type
)
1716 qemu_log("%s\n", __func__
);
1718 if ((access_type
== ACCESS_CODE
&& msr_ir
== 0) ||
1719 (access_type
!= ACCESS_CODE
&& msr_dr
== 0)) {
1720 if (env
->mmu_model
== POWERPC_MMU_BOOKE
) {
1721 /* The BookE MMU always performs address translation. The
1722 IS and DS bits only affect the address space. */
1723 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1725 } else if (env
->mmu_model
== POWERPC_MMU_BOOKE206
) {
1726 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1729 /* No address translation. */
1730 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1734 switch (env
->mmu_model
) {
1735 case POWERPC_MMU_32B
:
1736 case POWERPC_MMU_601
:
1737 case POWERPC_MMU_SOFT_6xx
:
1738 case POWERPC_MMU_SOFT_74xx
:
1739 /* Try to find a BAT */
1740 if (env
->nb_BATs
!= 0) {
1741 ret
= get_bat(env
, ctx
, eaddr
, rw
, access_type
);
1743 #if defined(TARGET_PPC64)
1744 case POWERPC_MMU_620
:
1745 case POWERPC_MMU_64B
:
1746 case POWERPC_MMU_2_06
:
1747 case POWERPC_MMU_2_06d
:
1750 /* We didn't match any BAT entry or don't have BATs */
1751 ret
= get_segment(env
, ctx
, eaddr
, rw
, access_type
);
1754 case POWERPC_MMU_SOFT_4xx
:
1755 case POWERPC_MMU_SOFT_4xx_Z
:
1756 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1759 case POWERPC_MMU_BOOKE
:
1760 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1763 case POWERPC_MMU_BOOKE206
:
1764 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1767 case POWERPC_MMU_MPC8xx
:
1769 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1771 case POWERPC_MMU_REAL
:
1772 cpu_abort(env
, "PowerPC in real mode do not do any translation\n");
1775 cpu_abort(env
, "Unknown or invalid MMU model\n");
1780 qemu_log("%s address " TARGET_FMT_lx
" => %d " TARGET_FMT_plx
"\n",
1781 __func__
, eaddr
, ret
, ctx
->raddr
);
1787 hwaddr
cpu_get_phys_page_debug(CPUPPCState
*env
, target_ulong addr
)
1791 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) != 0)) {
1795 return ctx
.raddr
& TARGET_PAGE_MASK
;
1798 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1801 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1802 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1803 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1804 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1805 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1806 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1809 if (((rw
== 2) && msr_ir
) || ((rw
!= 2) && msr_dr
)) {
1810 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1811 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1814 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1815 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1817 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1818 case MAS4_TIDSELD_PID0
:
1819 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID
] << MAS1_TID_SHIFT
;
1821 case MAS4_TIDSELD_PID1
:
1822 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID1
] << MAS1_TID_SHIFT
;
1824 case MAS4_TIDSELD_PID2
:
1825 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID2
] << MAS1_TID_SHIFT
;
1829 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1831 /* next victim logic */
1832 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1834 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1835 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1838 /* Perform address translation */
1839 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
1849 access_type
= ACCESS_CODE
;
1852 access_type
= env
->access_type
;
1854 ret
= get_physical_address(env
, &ctx
, address
, rw
, access_type
);
1856 tlb_set_page(env
, address
& TARGET_PAGE_MASK
,
1857 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1858 mmu_idx
, TARGET_PAGE_SIZE
);
1860 } else if (ret
< 0) {
1862 if (access_type
== ACCESS_CODE
) {
1865 /* No matches in page tables or TLB */
1866 switch (env
->mmu_model
) {
1867 case POWERPC_MMU_SOFT_6xx
:
1868 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1869 env
->error_code
= 1 << 18;
1870 env
->spr
[SPR_IMISS
] = address
;
1871 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1873 case POWERPC_MMU_SOFT_74xx
:
1874 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1876 case POWERPC_MMU_SOFT_4xx
:
1877 case POWERPC_MMU_SOFT_4xx_Z
:
1878 env
->exception_index
= POWERPC_EXCP_ITLB
;
1879 env
->error_code
= 0;
1880 env
->spr
[SPR_40x_DEAR
] = address
;
1881 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1883 case POWERPC_MMU_32B
:
1884 case POWERPC_MMU_601
:
1885 #if defined(TARGET_PPC64)
1886 case POWERPC_MMU_620
:
1887 case POWERPC_MMU_64B
:
1888 case POWERPC_MMU_2_06
:
1889 case POWERPC_MMU_2_06d
:
1891 env
->exception_index
= POWERPC_EXCP_ISI
;
1892 env
->error_code
= 0x40000000;
1894 case POWERPC_MMU_BOOKE206
:
1895 booke206_update_mas_tlb_miss(env
, address
, rw
);
1897 case POWERPC_MMU_BOOKE
:
1898 env
->exception_index
= POWERPC_EXCP_ITLB
;
1899 env
->error_code
= 0;
1900 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1902 case POWERPC_MMU_MPC8xx
:
1904 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1906 case POWERPC_MMU_REAL
:
1907 cpu_abort(env
, "PowerPC in real mode should never raise "
1908 "any MMU exceptions\n");
1911 cpu_abort(env
, "Unknown or invalid MMU model\n");
1916 /* Access rights violation */
1917 env
->exception_index
= POWERPC_EXCP_ISI
;
1918 env
->error_code
= 0x08000000;
1921 /* No execute protection violation */
1922 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1923 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1924 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1926 env
->exception_index
= POWERPC_EXCP_ISI
;
1927 env
->error_code
= 0x10000000;
1930 /* Direct store exception */
1931 /* No code fetch is allowed in direct-store areas */
1932 env
->exception_index
= POWERPC_EXCP_ISI
;
1933 env
->error_code
= 0x10000000;
1935 #if defined(TARGET_PPC64)
1937 /* No match in segment table */
1938 if (env
->mmu_model
== POWERPC_MMU_620
) {
1939 env
->exception_index
= POWERPC_EXCP_ISI
;
1940 /* XXX: this might be incorrect */
1941 env
->error_code
= 0x40000000;
1943 env
->exception_index
= POWERPC_EXCP_ISEG
;
1944 env
->error_code
= 0;
1952 /* No matches in page tables or TLB */
1953 switch (env
->mmu_model
) {
1954 case POWERPC_MMU_SOFT_6xx
:
1956 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1957 env
->error_code
= 1 << 16;
1959 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1960 env
->error_code
= 0;
1962 env
->spr
[SPR_DMISS
] = address
;
1963 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1965 env
->error_code
|= ctx
.key
<< 19;
1966 env
->spr
[SPR_HASH1
] = env
->htab_base
+
1967 get_pteg_offset(env
, ctx
.hash
[0], HASH_PTE_SIZE_32
);
1968 env
->spr
[SPR_HASH2
] = env
->htab_base
+
1969 get_pteg_offset(env
, ctx
.hash
[1], HASH_PTE_SIZE_32
);
1971 case POWERPC_MMU_SOFT_74xx
:
1973 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1975 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1978 /* Implement LRU algorithm */
1979 env
->error_code
= ctx
.key
<< 19;
1980 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1981 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1982 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1984 case POWERPC_MMU_SOFT_4xx
:
1985 case POWERPC_MMU_SOFT_4xx_Z
:
1986 env
->exception_index
= POWERPC_EXCP_DTLB
;
1987 env
->error_code
= 0;
1988 env
->spr
[SPR_40x_DEAR
] = address
;
1990 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1992 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1995 case POWERPC_MMU_32B
:
1996 case POWERPC_MMU_601
:
1997 #if defined(TARGET_PPC64)
1998 case POWERPC_MMU_620
:
1999 case POWERPC_MMU_64B
:
2000 case POWERPC_MMU_2_06
:
2001 case POWERPC_MMU_2_06d
:
2003 env
->exception_index
= POWERPC_EXCP_DSI
;
2004 env
->error_code
= 0;
2005 env
->spr
[SPR_DAR
] = address
;
2007 env
->spr
[SPR_DSISR
] = 0x42000000;
2009 env
->spr
[SPR_DSISR
] = 0x40000000;
2012 case POWERPC_MMU_MPC8xx
:
2014 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2016 case POWERPC_MMU_BOOKE206
:
2017 booke206_update_mas_tlb_miss(env
, address
, rw
);
2019 case POWERPC_MMU_BOOKE
:
2020 env
->exception_index
= POWERPC_EXCP_DTLB
;
2021 env
->error_code
= 0;
2022 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2023 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2025 case POWERPC_MMU_REAL
:
2026 cpu_abort(env
, "PowerPC in real mode should never raise "
2027 "any MMU exceptions\n");
2030 cpu_abort(env
, "Unknown or invalid MMU model\n");
2035 /* Access rights violation */
2036 env
->exception_index
= POWERPC_EXCP_DSI
;
2037 env
->error_code
= 0;
2038 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
2039 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
2040 env
->spr
[SPR_40x_DEAR
] = address
;
2042 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
2044 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
2045 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
2046 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2047 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2049 env
->spr
[SPR_DAR
] = address
;
2051 env
->spr
[SPR_DSISR
] = 0x0A000000;
2053 env
->spr
[SPR_DSISR
] = 0x08000000;
2058 /* Direct store exception */
2059 switch (access_type
) {
2061 /* Floating point load/store */
2062 env
->exception_index
= POWERPC_EXCP_ALIGN
;
2063 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
2064 env
->spr
[SPR_DAR
] = address
;
2067 /* lwarx, ldarx or stwcx. */
2068 env
->exception_index
= POWERPC_EXCP_DSI
;
2069 env
->error_code
= 0;
2070 env
->spr
[SPR_DAR
] = address
;
2072 env
->spr
[SPR_DSISR
] = 0x06000000;
2074 env
->spr
[SPR_DSISR
] = 0x04000000;
2078 /* eciwx or ecowx */
2079 env
->exception_index
= POWERPC_EXCP_DSI
;
2080 env
->error_code
= 0;
2081 env
->spr
[SPR_DAR
] = address
;
2083 env
->spr
[SPR_DSISR
] = 0x06100000;
2085 env
->spr
[SPR_DSISR
] = 0x04100000;
2089 printf("DSI: invalid exception (%d)\n", ret
);
2090 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
2092 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
2093 env
->spr
[SPR_DAR
] = address
;
2097 #if defined(TARGET_PPC64)
2099 /* No match in segment table */
2100 if (env
->mmu_model
== POWERPC_MMU_620
) {
2101 env
->exception_index
= POWERPC_EXCP_DSI
;
2102 env
->error_code
= 0;
2103 env
->spr
[SPR_DAR
] = address
;
2104 /* XXX: this might be incorrect */
2106 env
->spr
[SPR_DSISR
] = 0x42000000;
2108 env
->spr
[SPR_DSISR
] = 0x40000000;
2111 env
->exception_index
= POWERPC_EXCP_DSEG
;
2112 env
->error_code
= 0;
2113 env
->spr
[SPR_DAR
] = address
;
2120 printf("%s: set exception to %d %02x\n", __func__
,
2121 env
->exception
, env
->error_code
);
2129 /*****************************************************************************/
2130 /* BATs management */
2131 #if !defined(FLUSH_ALL_TLBS)
2132 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
2135 target_ulong base
, end
, page
;
2137 base
= BATu
& ~0x0001FFFF;
2138 end
= base
+ mask
+ 0x00020000;
2139 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
2140 TARGET_FMT_lx
")\n", base
, end
, mask
);
2141 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2142 tlb_flush_page(env
, page
);
2144 LOG_BATS("Flush done\n");
2148 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
2151 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
2152 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
2155 void helper_store_ibatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2159 dump_store_bat(env
, 'I', 0, nr
, value
);
2160 if (env
->IBAT
[0][nr
] != value
) {
2161 mask
= (value
<< 15) & 0x0FFE0000UL
;
2162 #if !defined(FLUSH_ALL_TLBS)
2163 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2165 /* When storing valid upper BAT, mask BEPI and BRPN
2166 * and invalidate all TLBs covered by this BAT
2168 mask
= (value
<< 15) & 0x0FFE0000UL
;
2169 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2170 (value
& ~0x0001FFFFUL
& ~mask
);
2171 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
2172 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2173 #if !defined(FLUSH_ALL_TLBS)
2174 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2181 void helper_store_ibatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2183 dump_store_bat(env
, 'I', 1, nr
, value
);
2184 env
->IBAT
[1][nr
] = value
;
2187 void helper_store_dbatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2191 dump_store_bat(env
, 'D', 0, nr
, value
);
2192 if (env
->DBAT
[0][nr
] != value
) {
2193 /* When storing valid upper BAT, mask BEPI and BRPN
2194 * and invalidate all TLBs covered by this BAT
2196 mask
= (value
<< 15) & 0x0FFE0000UL
;
2197 #if !defined(FLUSH_ALL_TLBS)
2198 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2200 mask
= (value
<< 15) & 0x0FFE0000UL
;
2201 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2202 (value
& ~0x0001FFFFUL
& ~mask
);
2203 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
2204 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2205 #if !defined(FLUSH_ALL_TLBS)
2206 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2213 void helper_store_dbatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2215 dump_store_bat(env
, 'D', 1, nr
, value
);
2216 env
->DBAT
[1][nr
] = value
;
2219 void helper_store_601_batu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2222 #if defined(FLUSH_ALL_TLBS)
2226 dump_store_bat(env
, 'I', 0, nr
, value
);
2227 if (env
->IBAT
[0][nr
] != value
) {
2228 #if defined(FLUSH_ALL_TLBS)
2231 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2232 if (env
->IBAT
[1][nr
] & 0x40) {
2233 /* Invalidate BAT only if it is valid */
2234 #if !defined(FLUSH_ALL_TLBS)
2235 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2240 /* When storing valid upper BAT, mask BEPI and BRPN
2241 * and invalidate all TLBs covered by this BAT
2243 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2244 (value
& ~0x0001FFFFUL
& ~mask
);
2245 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
2246 if (env
->IBAT
[1][nr
] & 0x40) {
2247 #if !defined(FLUSH_ALL_TLBS)
2248 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2253 #if defined(FLUSH_ALL_TLBS)
2261 void helper_store_601_batl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2264 #if defined(FLUSH_ALL_TLBS)
2268 dump_store_bat(env
, 'I', 1, nr
, value
);
2269 if (env
->IBAT
[1][nr
] != value
) {
2270 #if defined(FLUSH_ALL_TLBS)
2273 if (env
->IBAT
[1][nr
] & 0x40) {
2274 #if !defined(FLUSH_ALL_TLBS)
2275 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2276 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2282 #if !defined(FLUSH_ALL_TLBS)
2283 mask
= (value
<< 17) & 0x0FFE0000UL
;
2284 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2289 env
->IBAT
[1][nr
] = value
;
2290 env
->DBAT
[1][nr
] = value
;
2291 #if defined(FLUSH_ALL_TLBS)
2299 /*****************************************************************************/
2300 /* TLB management */
2301 void ppc_tlb_invalidate_all(CPUPPCState
*env
)
2303 switch (env
->mmu_model
) {
2304 case POWERPC_MMU_SOFT_6xx
:
2305 case POWERPC_MMU_SOFT_74xx
:
2306 ppc6xx_tlb_invalidate_all(env
);
2308 case POWERPC_MMU_SOFT_4xx
:
2309 case POWERPC_MMU_SOFT_4xx_Z
:
2310 ppc4xx_tlb_invalidate_all(env
);
2312 case POWERPC_MMU_REAL
:
2313 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2315 case POWERPC_MMU_MPC8xx
:
2317 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2319 case POWERPC_MMU_BOOKE
:
2322 case POWERPC_MMU_BOOKE206
:
2323 booke206_flush_tlb(env
, -1, 0);
2325 case POWERPC_MMU_32B
:
2326 case POWERPC_MMU_601
:
2327 #if defined(TARGET_PPC64)
2328 case POWERPC_MMU_620
:
2329 case POWERPC_MMU_64B
:
2330 case POWERPC_MMU_2_06
:
2331 case POWERPC_MMU_2_06d
:
2332 #endif /* defined(TARGET_PPC64) */
2337 cpu_abort(env
, "Unknown MMU model\n");
2342 void ppc_tlb_invalidate_one(CPUPPCState
*env
, target_ulong addr
)
2344 #if !defined(FLUSH_ALL_TLBS)
2345 addr
&= TARGET_PAGE_MASK
;
2346 switch (env
->mmu_model
) {
2347 case POWERPC_MMU_SOFT_6xx
:
2348 case POWERPC_MMU_SOFT_74xx
:
2349 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
2350 if (env
->id_tlbs
== 1) {
2351 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
2354 case POWERPC_MMU_SOFT_4xx
:
2355 case POWERPC_MMU_SOFT_4xx_Z
:
2356 ppc4xx_tlb_invalidate_virt(env
, addr
, env
->spr
[SPR_40x_PID
]);
2358 case POWERPC_MMU_REAL
:
2359 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2361 case POWERPC_MMU_MPC8xx
:
2363 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2365 case POWERPC_MMU_BOOKE
:
2367 cpu_abort(env
, "BookE MMU model is not implemented\n");
2369 case POWERPC_MMU_BOOKE206
:
2371 cpu_abort(env
, "BookE 2.06 MMU model is not implemented\n");
2373 case POWERPC_MMU_32B
:
2374 case POWERPC_MMU_601
:
2375 /* tlbie invalidate TLBs for all segments */
2376 addr
&= ~((target_ulong
)-1ULL << 28);
2377 /* XXX: this case should be optimized,
2378 * giving a mask to tlb_flush_page
2380 tlb_flush_page(env
, addr
| (0x0 << 28));
2381 tlb_flush_page(env
, addr
| (0x1 << 28));
2382 tlb_flush_page(env
, addr
| (0x2 << 28));
2383 tlb_flush_page(env
, addr
| (0x3 << 28));
2384 tlb_flush_page(env
, addr
| (0x4 << 28));
2385 tlb_flush_page(env
, addr
| (0x5 << 28));
2386 tlb_flush_page(env
, addr
| (0x6 << 28));
2387 tlb_flush_page(env
, addr
| (0x7 << 28));
2388 tlb_flush_page(env
, addr
| (0x8 << 28));
2389 tlb_flush_page(env
, addr
| (0x9 << 28));
2390 tlb_flush_page(env
, addr
| (0xA << 28));
2391 tlb_flush_page(env
, addr
| (0xB << 28));
2392 tlb_flush_page(env
, addr
| (0xC << 28));
2393 tlb_flush_page(env
, addr
| (0xD << 28));
2394 tlb_flush_page(env
, addr
| (0xE << 28));
2395 tlb_flush_page(env
, addr
| (0xF << 28));
2397 #if defined(TARGET_PPC64)
2398 case POWERPC_MMU_620
:
2399 case POWERPC_MMU_64B
:
2400 case POWERPC_MMU_2_06
:
2401 case POWERPC_MMU_2_06d
:
2402 /* tlbie invalidate TLBs for all segments */
2403 /* XXX: given the fact that there are too many segments to invalidate,
2404 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2405 * we just invalidate all TLBs
2409 #endif /* defined(TARGET_PPC64) */
2412 cpu_abort(env
, "Unknown MMU model\n");
2416 ppc_tlb_invalidate_all(env
);
2420 /*****************************************************************************/
2421 /* Special registers manipulation */
2422 #if defined(TARGET_PPC64)
2423 void ppc_store_asr(CPUPPCState
*env
, target_ulong value
)
2425 if (env
->asr
!= value
) {
2432 void ppc_store_sdr1(CPUPPCState
*env
, target_ulong value
)
2434 LOG_MMU("%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2435 if (env
->spr
[SPR_SDR1
] != value
) {
2436 env
->spr
[SPR_SDR1
] = value
;
2437 #if defined(TARGET_PPC64)
2438 if (env
->mmu_model
& POWERPC_MMU_64
) {
2439 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
2441 if (htabsize
> 28) {
2442 fprintf(stderr
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
2443 " stored in SDR1\n", htabsize
);
2446 env
->htab_mask
= (1ULL << (htabsize
+ 18)) - 1;
2447 env
->htab_base
= value
& SDR_64_HTABORG
;
2449 #endif /* defined(TARGET_PPC64) */
2451 /* FIXME: Should check for valid HTABMASK values */
2452 env
->htab_mask
= ((value
& SDR_32_HTABMASK
) << 16) | 0xFFFF;
2453 env
->htab_base
= value
& SDR_32_HTABORG
;
2459 /* Segment registers load and store */
2460 target_ulong
helper_load_sr(CPUPPCState
*env
, target_ulong sr_num
)
2462 #if defined(TARGET_PPC64)
2463 if (env
->mmu_model
& POWERPC_MMU_64
) {
2468 return env
->sr
[sr_num
];
2471 void helper_store_sr(CPUPPCState
*env
, target_ulong srnum
, target_ulong value
)
2473 LOG_MMU("%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2474 (int)srnum
, value
, env
->sr
[srnum
]);
2475 #if defined(TARGET_PPC64)
2476 if (env
->mmu_model
& POWERPC_MMU_64
) {
2477 uint64_t rb
= 0, rs
= 0;
2480 rb
|= ((uint32_t)srnum
& 0xf) << 28;
2481 /* Set the valid bit */
2484 rb
|= (uint32_t)srnum
;
2487 rs
|= (value
& 0xfffffff) << 12;
2489 rs
|= ((value
>> 27) & 0xf) << 8;
2491 ppc_store_slb(env
, rb
, rs
);
2494 if (env
->sr
[srnum
] != value
) {
2495 env
->sr
[srnum
] = value
;
2496 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
2497 flusing the whole TLB. */
2498 #if !defined(FLUSH_ALL_TLBS) && 0
2500 target_ulong page
, end
;
2501 /* Invalidate 256 MB of virtual memory */
2502 page
= (16 << 20) * srnum
;
2503 end
= page
+ (16 << 20);
2504 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2505 tlb_flush_page(env
, page
);
2513 #endif /* !defined(CONFIG_USER_ONLY) */
2515 #if !defined(CONFIG_USER_ONLY)
2516 /* SLB management */
2517 #if defined(TARGET_PPC64)
2518 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
2520 if (ppc_store_slb(env
, rb
, rs
) < 0) {
2521 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2522 POWERPC_EXCP_INVAL
);
2526 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
2528 target_ulong rt
= 0;
2530 if (ppc_load_slb_esid(env
, rb
, &rt
) < 0) {
2531 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2532 POWERPC_EXCP_INVAL
);
2537 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
2539 target_ulong rt
= 0;
2541 if (ppc_load_slb_vsid(env
, rb
, &rt
) < 0) {
2542 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2543 POWERPC_EXCP_INVAL
);
2547 #endif /* defined(TARGET_PPC64) */
2549 /* TLB management */
2550 void helper_tlbia(CPUPPCState
*env
)
2552 ppc_tlb_invalidate_all(env
);
2555 void helper_tlbie(CPUPPCState
*env
, target_ulong addr
)
2557 ppc_tlb_invalidate_one(env
, addr
);
2560 /* Software driven TLBs management */
2561 /* PowerPC 602/603 software TLB load instructions helpers */
2562 static void do_6xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2564 target_ulong RPN
, CMP
, EPN
;
2567 RPN
= env
->spr
[SPR_RPA
];
2569 CMP
= env
->spr
[SPR_ICMP
];
2570 EPN
= env
->spr
[SPR_IMISS
];
2572 CMP
= env
->spr
[SPR_DCMP
];
2573 EPN
= env
->spr
[SPR_DMISS
];
2575 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
2576 (void)EPN
; /* avoid a compiler warning */
2577 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2578 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2580 /* Store this TLB */
2581 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2582 way
, is_code
, CMP
, RPN
);
2585 void helper_6xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2587 do_6xx_tlb(env
, EPN
, 0);
2590 void helper_6xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2592 do_6xx_tlb(env
, EPN
, 1);
2595 /* PowerPC 74xx software TLB load instructions helpers */
2596 static void do_74xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2598 target_ulong RPN
, CMP
, EPN
;
2601 RPN
= env
->spr
[SPR_PTELO
];
2602 CMP
= env
->spr
[SPR_PTEHI
];
2603 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
2604 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
2605 (void)EPN
; /* avoid a compiler warning */
2606 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2607 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2609 /* Store this TLB */
2610 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2611 way
, is_code
, CMP
, RPN
);
2614 void helper_74xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2616 do_74xx_tlb(env
, EPN
, 0);
2619 void helper_74xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2621 do_74xx_tlb(env
, EPN
, 1);
2624 /*****************************************************************************/
2625 /* PowerPC 601 specific instructions (POWER bridge) */
2627 target_ulong
helper_rac(CPUPPCState
*env
, target_ulong addr
)
2631 target_ulong ret
= 0;
2633 /* We don't have to generate many instances of this instruction,
2634 * as rac is supervisor only.
2636 /* XXX: FIX THIS: Pretend we have no BAT */
2637 nb_BATs
= env
->nb_BATs
;
2639 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0) {
2642 env
->nb_BATs
= nb_BATs
;
2646 static inline target_ulong
booke_tlb_to_page_size(int size
)
2648 return 1024 << (2 * size
);
2651 static inline int booke_page_size_to_tlb(target_ulong page_size
)
2655 switch (page_size
) {
2689 #if defined(TARGET_PPC64)
2690 case 0x000100000000ULL
:
2693 case 0x000400000000ULL
:
2696 case 0x001000000000ULL
:
2699 case 0x004000000000ULL
:
2702 case 0x010000000000ULL
:
2714 /* Helpers for 4xx TLB management */
2715 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2717 #define PPC4XX_TLBHI_V 0x00000040
2718 #define PPC4XX_TLBHI_E 0x00000020
2719 #define PPC4XX_TLBHI_SIZE_MIN 0
2720 #define PPC4XX_TLBHI_SIZE_MAX 7
2721 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2722 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2723 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2725 #define PPC4XX_TLBLO_EX 0x00000200
2726 #define PPC4XX_TLBLO_WR 0x00000100
2727 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2728 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2730 target_ulong
helper_4xx_tlbre_hi(CPUPPCState
*env
, target_ulong entry
)
2736 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2737 tlb
= &env
->tlb
.tlbe
[entry
];
2739 if (tlb
->prot
& PAGE_VALID
) {
2740 ret
|= PPC4XX_TLBHI_V
;
2742 size
= booke_page_size_to_tlb(tlb
->size
);
2743 if (size
< PPC4XX_TLBHI_SIZE_MIN
|| size
> PPC4XX_TLBHI_SIZE_MAX
) {
2744 size
= PPC4XX_TLBHI_SIZE_DEFAULT
;
2746 ret
|= size
<< PPC4XX_TLBHI_SIZE_SHIFT
;
2747 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
2751 target_ulong
helper_4xx_tlbre_lo(CPUPPCState
*env
, target_ulong entry
)
2756 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2757 tlb
= &env
->tlb
.tlbe
[entry
];
2759 if (tlb
->prot
& PAGE_EXEC
) {
2760 ret
|= PPC4XX_TLBLO_EX
;
2762 if (tlb
->prot
& PAGE_WRITE
) {
2763 ret
|= PPC4XX_TLBLO_WR
;
2768 void helper_4xx_tlbwe_hi(CPUPPCState
*env
, target_ulong entry
,
2772 target_ulong page
, end
;
2774 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2776 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2777 tlb
= &env
->tlb
.tlbe
[entry
];
2778 /* Invalidate previous TLB (if it's valid) */
2779 if (tlb
->prot
& PAGE_VALID
) {
2780 end
= tlb
->EPN
+ tlb
->size
;
2781 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
2782 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2783 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2784 tlb_flush_page(env
, page
);
2787 tlb
->size
= booke_tlb_to_page_size((val
>> PPC4XX_TLBHI_SIZE_SHIFT
)
2788 & PPC4XX_TLBHI_SIZE_MASK
);
2789 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2790 * If this ever occurs, one should use the ppcemb target instead
2791 * of the ppc or ppc64 one
2793 if ((val
& PPC4XX_TLBHI_V
) && tlb
->size
< TARGET_PAGE_SIZE
) {
2794 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
2795 "are not supported (%d)\n",
2796 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
2798 tlb
->EPN
= val
& ~(tlb
->size
- 1);
2799 if (val
& PPC4XX_TLBHI_V
) {
2800 tlb
->prot
|= PAGE_VALID
;
2801 if (val
& PPC4XX_TLBHI_E
) {
2802 /* XXX: TO BE FIXED */
2804 "Little-endian TLB entries are not supported by now\n");
2807 tlb
->prot
&= ~PAGE_VALID
;
2809 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
2810 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2811 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2812 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2813 tlb
->prot
& PAGE_READ
? 'r' : '-',
2814 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2815 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2816 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2817 /* Invalidate new TLB (if valid) */
2818 if (tlb
->prot
& PAGE_VALID
) {
2819 end
= tlb
->EPN
+ tlb
->size
;
2820 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
2821 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2822 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2823 tlb_flush_page(env
, page
);
2828 void helper_4xx_tlbwe_lo(CPUPPCState
*env
, target_ulong entry
,
2833 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2835 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2836 tlb
= &env
->tlb
.tlbe
[entry
];
2837 tlb
->attr
= val
& PPC4XX_TLBLO_ATTR_MASK
;
2838 tlb
->RPN
= val
& PPC4XX_TLBLO_RPN_MASK
;
2839 tlb
->prot
= PAGE_READ
;
2840 if (val
& PPC4XX_TLBLO_EX
) {
2841 tlb
->prot
|= PAGE_EXEC
;
2843 if (val
& PPC4XX_TLBLO_WR
) {
2844 tlb
->prot
|= PAGE_WRITE
;
2846 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2847 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2848 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2849 tlb
->prot
& PAGE_READ
? 'r' : '-',
2850 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2851 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2852 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2855 target_ulong
helper_4xx_tlbsx(CPUPPCState
*env
, target_ulong address
)
2857 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
2860 /* PowerPC 440 TLB management */
2861 void helper_440_tlbwe(CPUPPCState
*env
, uint32_t word
, target_ulong entry
,
2865 target_ulong EPN
, RPN
, size
;
2868 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
2869 __func__
, word
, (int)entry
, value
);
2872 tlb
= &env
->tlb
.tlbe
[entry
];
2875 /* Just here to please gcc */
2877 EPN
= value
& 0xFFFFFC00;
2878 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
) {
2882 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
2883 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
) {
2888 tlb
->attr
|= (value
>> 8) & 1;
2889 if (value
& 0x200) {
2890 tlb
->prot
|= PAGE_VALID
;
2892 if (tlb
->prot
& PAGE_VALID
) {
2893 tlb
->prot
&= ~PAGE_VALID
;
2897 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
2898 if (do_flush_tlbs
) {
2903 RPN
= value
& 0xFFFFFC0F;
2904 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
) {
2910 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
2911 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
2913 tlb
->prot
|= PAGE_READ
<< 4;
2916 tlb
->prot
|= PAGE_WRITE
<< 4;
2919 tlb
->prot
|= PAGE_EXEC
<< 4;
2922 tlb
->prot
|= PAGE_READ
;
2925 tlb
->prot
|= PAGE_WRITE
;
2928 tlb
->prot
|= PAGE_EXEC
;
2934 target_ulong
helper_440_tlbre(CPUPPCState
*env
, uint32_t word
,
2942 tlb
= &env
->tlb
.tlbe
[entry
];
2945 /* Just here to please gcc */
2948 size
= booke_page_size_to_tlb(tlb
->size
);
2949 if (size
< 0 || size
> 0xF) {
2953 if (tlb
->attr
& 0x1) {
2956 if (tlb
->prot
& PAGE_VALID
) {
2959 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
2960 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
2966 ret
= tlb
->attr
& ~0x1;
2967 if (tlb
->prot
& (PAGE_READ
<< 4)) {
2970 if (tlb
->prot
& (PAGE_WRITE
<< 4)) {
2973 if (tlb
->prot
& (PAGE_EXEC
<< 4)) {
2976 if (tlb
->prot
& PAGE_READ
) {
2979 if (tlb
->prot
& PAGE_WRITE
) {
2982 if (tlb
->prot
& PAGE_EXEC
) {
2990 target_ulong
helper_440_tlbsx(CPUPPCState
*env
, target_ulong address
)
2992 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
2995 /* PowerPC BookE 2.06 TLB management */
2997 static ppcmas_tlb_t
*booke206_cur_tlb(CPUPPCState
*env
)
2999 uint32_t tlbncfg
= 0;
3000 int esel
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ESEL_MASK
) >> MAS0_ESEL_SHIFT
;
3001 int ea
= (env
->spr
[SPR_BOOKE_MAS2
] & MAS2_EPN_MASK
);
3004 tlb
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
3005 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlb
];
3007 if ((tlbncfg
& TLBnCFG_HES
) && (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_HES
)) {
3008 cpu_abort(env
, "we don't support HES yet\n");
3011 return booke206_get_tlbm(env
, tlb
, ea
, esel
);
3014 void helper_booke_setpid(CPUPPCState
*env
, uint32_t pidn
, target_ulong pid
)
3016 env
->spr
[pidn
] = pid
;
3017 /* changing PIDs mean we're in a different address space now */
3021 void helper_booke206_tlbwe(CPUPPCState
*env
)
3023 uint32_t tlbncfg
, tlbn
;
3025 uint32_t size_tlb
, size_ps
;
3029 switch (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_WQ_MASK
) {
3030 case MAS0_WQ_ALWAYS
:
3031 /* good to go, write that entry */
3034 /* XXX check if reserved */
3039 case MAS0_WQ_CLR_RSRV
:
3040 /* XXX clear entry */
3043 /* no idea what to do */
3047 if (((env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ATSEL
) == MAS0_ATSEL_LRAT
) &&
3049 /* XXX we don't support direct LRAT setting yet */
3050 fprintf(stderr
, "cpu: don't support LRAT setting yet\n");
3054 tlbn
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
3055 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
3057 tlb
= booke206_cur_tlb(env
);
3060 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3061 POWERPC_EXCP_INVAL
|
3062 POWERPC_EXCP_INVAL_INVAL
);
3065 /* check that we support the targeted size */
3066 size_tlb
= (env
->spr
[SPR_BOOKE_MAS1
] & MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
3067 size_ps
= booke206_tlbnps(env
, tlbn
);
3068 if ((env
->spr
[SPR_BOOKE_MAS1
] & MAS1_VALID
) && (tlbncfg
& TLBnCFG_AVAIL
) &&
3069 !(size_ps
& (1 << size_tlb
))) {
3070 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3071 POWERPC_EXCP_INVAL
|
3072 POWERPC_EXCP_INVAL_INVAL
);
3076 cpu_abort(env
, "missing HV implementation\n");
3078 tlb
->mas7_3
= ((uint64_t)env
->spr
[SPR_BOOKE_MAS7
] << 32) |
3079 env
->spr
[SPR_BOOKE_MAS3
];
3080 tlb
->mas1
= env
->spr
[SPR_BOOKE_MAS1
];
3083 if (!(tlbncfg
& TLBnCFG_AVAIL
)) {
3084 /* force !AVAIL TLB entries to correct page size */
3085 tlb
->mas1
&= ~MAS1_TSIZE_MASK
;
3086 /* XXX can be configured in MMUCSR0 */
3087 tlb
->mas1
|= (tlbncfg
& TLBnCFG_MINSIZE
) >> 12;
3090 /* Make a mask from TLB size to discard invalid bits in EPN field */
3091 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3092 /* Add a mask for page attributes */
3093 mask
|= MAS2_ACM
| MAS2_VLE
| MAS2_W
| MAS2_I
| MAS2_M
| MAS2_G
| MAS2_E
;
3096 /* Executing a tlbwe instruction in 32-bit mode will set
3097 * bits 0:31 of the TLB EPN field to zero.
3102 tlb
->mas2
= env
->spr
[SPR_BOOKE_MAS2
] & mask
;
3104 if (!(tlbncfg
& TLBnCFG_IPROT
)) {
3105 /* no IPROT supported by TLB */
3106 tlb
->mas1
&= ~MAS1_IPROT
;
3109 if (booke206_tlb_to_page_size(env
, tlb
) == TARGET_PAGE_SIZE
) {
3110 tlb_flush_page(env
, tlb
->mas2
& MAS2_EPN_MASK
);
3116 static inline void booke206_tlb_to_mas(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
3118 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
3119 int way
= booke206_tlbm_to_way(env
, tlb
);
3121 env
->spr
[SPR_BOOKE_MAS0
] = tlbn
<< MAS0_TLBSEL_SHIFT
;
3122 env
->spr
[SPR_BOOKE_MAS0
] |= way
<< MAS0_ESEL_SHIFT
;
3123 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3125 env
->spr
[SPR_BOOKE_MAS1
] = tlb
->mas1
;
3126 env
->spr
[SPR_BOOKE_MAS2
] = tlb
->mas2
;
3127 env
->spr
[SPR_BOOKE_MAS3
] = tlb
->mas7_3
;
3128 env
->spr
[SPR_BOOKE_MAS7
] = tlb
->mas7_3
>> 32;
3131 void helper_booke206_tlbre(CPUPPCState
*env
)
3133 ppcmas_tlb_t
*tlb
= NULL
;
3135 tlb
= booke206_cur_tlb(env
);
3137 env
->spr
[SPR_BOOKE_MAS1
] = 0;
3139 booke206_tlb_to_mas(env
, tlb
);
3143 void helper_booke206_tlbsx(CPUPPCState
*env
, target_ulong address
)
3145 ppcmas_tlb_t
*tlb
= NULL
;
3150 spid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID_MASK
) >> MAS6_SPID_SHIFT
;
3151 sas
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
;
3153 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3154 int ways
= booke206_tlb_ways(env
, i
);
3156 for (j
= 0; j
< ways
; j
++) {
3157 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3163 if (ppcmas_tlb_check(env
, tlb
, &raddr
, address
, spid
)) {
3167 if (sas
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
3171 booke206_tlb_to_mas(env
, tlb
);
3176 /* no entry found, fill with defaults */
3177 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
3178 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
3179 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
3180 env
->spr
[SPR_BOOKE_MAS3
] = 0;
3181 env
->spr
[SPR_BOOKE_MAS7
] = 0;
3183 if (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
) {
3184 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
3187 env
->spr
[SPR_BOOKE_MAS1
] |= (env
->spr
[SPR_BOOKE_MAS6
] >> 16)
3190 /* next victim logic */
3191 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
3193 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
3194 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3197 static inline void booke206_invalidate_ea_tlb(CPUPPCState
*env
, int tlbn
,
3201 int ways
= booke206_tlb_ways(env
, tlbn
);
3204 for (i
= 0; i
< ways
; i
++) {
3205 ppcmas_tlb_t
*tlb
= booke206_get_tlbm(env
, tlbn
, ea
, i
);
3209 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3210 if (((tlb
->mas2
& MAS2_EPN_MASK
) == (ea
& mask
)) &&
3211 !(tlb
->mas1
& MAS1_IPROT
)) {
3212 tlb
->mas1
&= ~MAS1_VALID
;
3217 void helper_booke206_tlbivax(CPUPPCState
*env
, target_ulong address
)
3219 if (address
& 0x4) {
3220 /* flush all entries */
3221 if (address
& 0x8) {
3222 /* flush all of TLB1 */
3223 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB1
, 1);
3225 /* flush all of TLB0 */
3226 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB0
, 0);
3231 if (address
& 0x8) {
3232 /* flush TLB1 entries */
3233 booke206_invalidate_ea_tlb(env
, 1, address
);
3236 /* flush TLB0 entries */
3237 booke206_invalidate_ea_tlb(env
, 0, address
);
3238 tlb_flush_page(env
, address
& MAS2_EPN_MASK
);
3242 void helper_booke206_tlbilx0(CPUPPCState
*env
, target_ulong address
)
3244 /* XXX missing LPID handling */
3245 booke206_flush_tlb(env
, -1, 1);
3248 void helper_booke206_tlbilx1(CPUPPCState
*env
, target_ulong address
)
3251 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3252 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
3255 /* XXX missing LPID handling */
3256 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3257 tlb_size
= booke206_tlb_size(env
, i
);
3258 for (j
= 0; j
< tlb_size
; j
++) {
3259 if (!(tlb
[j
].mas1
& MAS1_IPROT
) &&
3260 ((tlb
[j
].mas1
& MAS1_TID_MASK
) == tid
)) {
3261 tlb
[j
].mas1
&= ~MAS1_VALID
;
3264 tlb
+= booke206_tlb_size(env
, i
);
3269 void helper_booke206_tlbilx3(CPUPPCState
*env
, target_ulong address
)
3273 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3274 int pid
= tid
>> MAS6_SPID_SHIFT
;
3275 int sgs
= env
->spr
[SPR_BOOKE_MAS5
] & MAS5_SGS
;
3276 int ind
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SIND
) ? MAS1_IND
: 0;
3277 /* XXX check for unsupported isize and raise an invalid opcode then */
3278 int size
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_ISIZE_MASK
;
3279 /* XXX implement MAV2 handling */
3282 /* XXX missing LPID handling */
3283 /* flush by pid and ea */
3284 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3285 int ways
= booke206_tlb_ways(env
, i
);
3287 for (j
= 0; j
< ways
; j
++) {
3288 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3292 if ((ppcmas_tlb_check(env
, tlb
, NULL
, address
, pid
) != 0) ||
3293 (tlb
->mas1
& MAS1_IPROT
) ||
3294 ((tlb
->mas1
& MAS1_IND
) != ind
) ||
3295 ((tlb
->mas8
& MAS8_TGS
) != sgs
)) {
3298 if (mav2
&& ((tlb
->mas1
& MAS1_TSIZE_MASK
) != size
)) {
3299 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
3302 /* XXX e500mc doesn't match SAS, but other cores might */
3303 tlb
->mas1
&= ~MAS1_VALID
;
3309 void helper_booke206_tlbflush(CPUPPCState
*env
, uint32_t type
)
3314 flags
|= BOOKE206_FLUSH_TLB1
;
3318 flags
|= BOOKE206_FLUSH_TLB0
;
3321 booke206_flush_tlb(env
, flags
, 1);