2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 //#define DEBUG_SOFTWARE_TLB
28 //#define DUMP_PAGE_TABLES
29 //#define DEBUG_SOFTWARE_TLB
30 //#define FLUSH_ALL_TLBS
33 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
34 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
36 # define LOG_MMU(...) do { } while (0)
37 # define LOG_MMU_STATE(...) do { } while (0)
40 #ifdef DEBUG_SOFTWARE_TLB
41 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
43 # define LOG_SWTLB(...) do { } while (0)
47 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
49 # define LOG_BATS(...) do { } while (0)
53 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
55 # define LOG_SLB(...) do { } while (0)
58 /*****************************************************************************/
59 /* PowerPC MMU emulation */
60 #if defined(CONFIG_USER_ONLY)
61 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
64 int exception
, error_code
;
67 exception
= POWERPC_EXCP_ISI
;
68 error_code
= 0x40000000;
70 exception
= POWERPC_EXCP_DSI
;
71 error_code
= 0x40000000;
73 error_code
|= 0x02000000;
75 env
->spr
[SPR_DAR
] = address
;
76 env
->spr
[SPR_DSISR
] = error_code
;
78 env
->exception_index
= exception
;
79 env
->error_code
= error_code
;
85 /* Common routines used by software and hardware TLBs emulation */
86 static inline int pte_is_valid(target_ulong pte0
)
88 return pte0
& 0x80000000 ? 1 : 0;
91 static inline void pte_invalidate(target_ulong
*pte0
)
96 #if defined(TARGET_PPC64)
97 static inline int pte64_is_valid(target_ulong pte0
)
99 return pte0
& 0x0000000000000001ULL
? 1 : 0;
102 static inline void pte64_invalidate(target_ulong
*pte0
)
104 *pte0
&= ~0x0000000000000001ULL
;
108 #define PTE_PTEM_MASK 0x7FFFFFBF
109 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
110 #if defined(TARGET_PPC64)
111 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
112 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
115 static inline int pp_check(int key
, int pp
, int nx
)
119 /* Compute access rights */
120 /* When pp is 3/7, the result is undefined. Set it to noaccess */
127 access
|= PAGE_WRITE
;
145 access
= PAGE_READ
| PAGE_WRITE
;
156 static inline int check_prot(int prot
, int rw
, int access_type
)
160 if (access_type
== ACCESS_CODE
) {
161 if (prot
& PAGE_EXEC
) {
167 if (prot
& PAGE_WRITE
) {
173 if (prot
& PAGE_READ
) {
183 static inline int pte_check(mmu_ctx_t
*ctx
, int is_64b
, target_ulong pte0
,
184 target_ulong pte1
, int h
, int rw
, int type
)
186 target_ulong ptem
, mmask
;
187 int access
, ret
, pteh
, ptev
, pp
;
190 /* Check validity and table match */
191 #if defined(TARGET_PPC64)
193 ptev
= pte64_is_valid(pte0
);
194 pteh
= (pte0
>> 1) & 1;
198 ptev
= pte_is_valid(pte0
);
199 pteh
= (pte0
>> 6) & 1;
201 if (ptev
&& h
== pteh
) {
202 /* Check vsid & api */
203 #if defined(TARGET_PPC64)
205 ptem
= pte0
& PTE64_PTEM_MASK
;
206 mmask
= PTE64_CHECK_MASK
;
207 pp
= (pte1
& 0x00000003) | ((pte1
>> 61) & 0x00000004);
208 ctx
->nx
= (pte1
>> 2) & 1; /* No execute bit */
209 ctx
->nx
|= (pte1
>> 3) & 1; /* Guarded bit */
213 ptem
= pte0
& PTE_PTEM_MASK
;
214 mmask
= PTE_CHECK_MASK
;
215 pp
= pte1
& 0x00000003;
217 if (ptem
== ctx
->ptem
) {
218 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
219 /* all matches should have equal RPN, WIMG & PP */
220 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
221 qemu_log("Bad RPN/WIMG/PP\n");
225 /* Compute access rights */
226 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
227 /* Keep the matching PTE informations */
230 ret
= check_prot(ctx
->prot
, rw
, type
);
233 LOG_MMU("PTE access granted !\n");
235 /* Access right violation */
236 LOG_MMU("PTE access rejected\n");
244 static inline int pte32_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
245 target_ulong pte1
, int h
, int rw
, int type
)
247 return pte_check(ctx
, 0, pte0
, pte1
, h
, rw
, type
);
250 #if defined(TARGET_PPC64)
251 static inline int pte64_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
252 target_ulong pte1
, int h
, int rw
, int type
)
254 return pte_check(ctx
, 1, pte0
, pte1
, h
, rw
, type
);
258 static inline int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
263 /* Update page flags */
264 if (!(*pte1p
& 0x00000100)) {
265 /* Update accessed flag */
266 *pte1p
|= 0x00000100;
269 if (!(*pte1p
& 0x00000080)) {
270 if (rw
== 1 && ret
== 0) {
271 /* Update changed flag */
272 *pte1p
|= 0x00000080;
275 /* Force page fault for first write access */
276 ctx
->prot
&= ~PAGE_WRITE
;
283 /* Software driven TLB helpers */
284 static inline int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
285 int way
, int is_code
)
289 /* Select TLB num in a way from address */
290 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
292 nr
+= env
->tlb_per_way
* way
;
293 /* 6xx have separate TLBs for instructions and data */
294 if (is_code
&& env
->id_tlbs
== 1) {
301 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState
*env
)
306 /* LOG_SWTLB("Invalidate all TLBs\n"); */
307 /* Invalidate all defined software TLB */
309 if (env
->id_tlbs
== 1) {
312 for (nr
= 0; nr
< max
; nr
++) {
313 tlb
= &env
->tlb
.tlb6
[nr
];
314 pte_invalidate(&tlb
->pte0
);
319 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState
*env
,
321 int is_code
, int match_epn
)
323 #if !defined(FLUSH_ALL_TLBS)
327 /* Invalidate ITLB + DTLB, all ways */
328 for (way
= 0; way
< env
->nb_ways
; way
++) {
329 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
330 tlb
= &env
->tlb
.tlb6
[nr
];
331 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
332 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
334 pte_invalidate(&tlb
->pte0
);
335 tlb_flush_page(env
, tlb
->EPN
);
339 /* XXX: PowerPC specification say this is valid as well */
340 ppc6xx_tlb_invalidate_all(env
);
344 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState
*env
,
345 target_ulong eaddr
, int is_code
)
347 ppc6xx_tlb_invalidate_virt2(env
, eaddr
, is_code
, 0);
350 static void ppc6xx_tlb_store(CPUPPCState
*env
, target_ulong EPN
, int way
,
351 int is_code
, target_ulong pte0
, target_ulong pte1
)
356 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
357 tlb
= &env
->tlb
.tlb6
[nr
];
358 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
359 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
360 /* Invalidate any pending reference in QEMU for this virtual address */
361 ppc6xx_tlb_invalidate_virt2(env
, EPN
, is_code
, 1);
365 /* Store last way for LRU mechanism */
369 static inline int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
370 target_ulong eaddr
, int rw
, int access_type
)
377 ret
= -1; /* No TLB found */
378 for (way
= 0; way
< env
->nb_ways
; way
++) {
379 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
,
380 access_type
== ACCESS_CODE
? 1 : 0);
381 tlb
= &env
->tlb
.tlb6
[nr
];
382 /* This test "emulates" the PTE index match for hardware TLBs */
383 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
384 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
385 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
386 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
387 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
390 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
391 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
392 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
393 tlb
->EPN
, eaddr
, tlb
->pte1
,
394 rw
? 'S' : 'L', access_type
== ACCESS_CODE
? 'I' : 'D');
395 switch (pte32_check(ctx
, tlb
->pte0
, tlb
->pte1
, 0, rw
, access_type
)) {
397 /* TLB inconsistency */
400 /* Access violation */
410 /* XXX: we should go on looping to check all TLBs consistency
411 * but we can speed-up the whole thing as the
412 * result would be undefined if TLBs are not consistent.
421 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
422 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
423 /* Update page flags */
424 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, rw
);
430 /* Perform BAT hit & translation */
431 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
432 int *validp
, int *protp
, target_ulong
*BATu
,
438 bl
= (*BATu
& 0x00001FFC) << 15;
441 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
442 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
444 pp
= *BATl
& 0x00000003;
446 prot
= PAGE_READ
| PAGE_EXEC
;
457 static inline void bat_601_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
458 int *validp
, int *protp
,
459 target_ulong
*BATu
, target_ulong
*BATl
)
462 int key
, pp
, valid
, prot
;
464 bl
= (*BATl
& 0x0000003F) << 17;
465 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx
" msk " TARGET_FMT_lx
"\n",
466 (uint8_t)(*BATl
& 0x0000003F), bl
, ~bl
);
468 valid
= (*BATl
>> 6) & 1;
470 pp
= *BATu
& 0x00000003;
472 key
= (*BATu
>> 3) & 1;
474 key
= (*BATu
>> 2) & 1;
476 prot
= pp_check(key
, pp
, 0);
483 static inline int get_bat(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
484 target_ulong
virtual, int rw
, int type
)
486 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
487 target_ulong BEPIl
, BEPIu
, bl
;
491 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
492 type
== ACCESS_CODE
? 'I' : 'D', virtual);
495 BATlt
= env
->IBAT
[1];
496 BATut
= env
->IBAT
[0];
499 BATlt
= env
->DBAT
[1];
500 BATut
= env
->DBAT
[0];
503 for (i
= 0; i
< env
->nb_BATs
; i
++) {
506 BEPIu
= *BATu
& 0xF0000000;
507 BEPIl
= *BATu
& 0x0FFE0000;
508 if (unlikely(env
->mmu_model
== POWERPC_MMU_601
)) {
509 bat_601_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
511 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
513 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
514 " BATl " TARGET_FMT_lx
"\n", __func__
,
515 type
== ACCESS_CODE
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
516 if ((virtual & 0xF0000000) == BEPIu
&&
517 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
520 /* Get physical address */
521 ctx
->raddr
= (*BATl
& 0xF0000000) |
522 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
523 (virtual & 0x0001F000);
524 /* Compute access rights */
526 ret
= check_prot(ctx
->prot
, rw
, type
);
528 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
529 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
530 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
537 #if defined(DEBUG_BATS)
538 if (qemu_log_enabled()) {
539 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
540 for (i
= 0; i
< 4; i
++) {
543 BEPIu
= *BATu
& 0xF0000000;
544 BEPIl
= *BATu
& 0x0FFE0000;
545 bl
= (*BATu
& 0x00001FFC) << 15;
546 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
547 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
548 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
549 __func__
, type
== ACCESS_CODE
? 'I' : 'D', i
, virtual,
550 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
559 static inline hwaddr
get_pteg_offset(CPUPPCState
*env
,
563 return (hash
* pte_size
* 8) & env
->htab_mask
;
566 /* PTE table lookup */
567 static inline int find_pte2(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int is_64b
, int h
,
568 int rw
, int type
, int target_page_bits
)
571 target_ulong pte0
, pte1
;
575 ret
= -1; /* No entry found */
576 pteg_off
= get_pteg_offset(env
, ctx
->hash
[h
],
577 is_64b
? HASH_PTE_SIZE_64
: HASH_PTE_SIZE_32
);
578 for (i
= 0; i
< 8; i
++) {
579 #if defined(TARGET_PPC64)
581 if (env
->external_htab
) {
582 pte0
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16));
583 pte1
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16) + 8);
585 pte0
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16));
586 pte1
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16) + 8);
589 r
= pte64_check(ctx
, pte0
, pte1
, h
, rw
, type
);
590 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
591 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
592 pteg_off
+ (i
* 16), pte0
, pte1
, (int)(pte0
& 1), h
,
593 (int)((pte0
>> 1) & 1), ctx
->ptem
);
597 if (env
->external_htab
) {
598 pte0
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8));
599 pte1
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8) + 4);
601 pte0
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8));
602 pte1
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8) + 4);
604 r
= pte32_check(ctx
, pte0
, pte1
, h
, rw
, type
);
605 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
606 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
607 pteg_off
+ (i
* 8), pte0
, pte1
, (int)(pte0
>> 31), h
,
608 (int)((pte0
>> 6) & 1), ctx
->ptem
);
612 /* PTE inconsistency */
615 /* Access violation */
625 /* XXX: we should go on looping to check all PTEs consistency
626 * but if we can speed-up the whole thing as the
627 * result would be undefined if PTEs are not consistent.
636 LOG_MMU("found PTE at addr " TARGET_FMT_lx
" prot=%01x ret=%d\n",
637 ctx
->raddr
, ctx
->prot
, ret
);
638 /* Update page flags */
640 if (pte_update_flags(ctx
, &pte1
, ret
, rw
) == 1) {
641 #if defined(TARGET_PPC64)
643 if (env
->external_htab
) {
644 stq_p(env
->external_htab
+ pteg_off
+ (good
* 16) + 8,
647 stq_phys_notdirty(env
->htab_base
+ pteg_off
+
648 (good
* 16) + 8, pte1
);
653 if (env
->external_htab
) {
654 stl_p(env
->external_htab
+ pteg_off
+ (good
* 8) + 4,
657 stl_phys_notdirty(env
->htab_base
+ pteg_off
+
658 (good
* 8) + 4, pte1
);
664 /* We have a TLB that saves 4K pages, so let's
665 * split a huge page to 4k chunks */
666 if (target_page_bits
!= TARGET_PAGE_BITS
) {
667 ctx
->raddr
|= (ctx
->eaddr
& ((1 << target_page_bits
) - 1))
673 static inline int find_pte(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int h
, int rw
,
674 int type
, int target_page_bits
)
676 #if defined(TARGET_PPC64)
677 if (env
->mmu_model
& POWERPC_MMU_64
) {
678 return find_pte2(env
, ctx
, 1, h
, rw
, type
, target_page_bits
);
682 return find_pte2(env
, ctx
, 0, h
, rw
, type
, target_page_bits
);
685 #if defined(TARGET_PPC64)
686 static inline ppc_slb_t
*slb_lookup(CPUPPCState
*env
, target_ulong eaddr
)
688 uint64_t esid_256M
, esid_1T
;
691 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
693 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
694 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
696 for (n
= 0; n
< env
->slb_nr
; n
++) {
697 ppc_slb_t
*slb
= &env
->slb
[n
];
699 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
700 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
701 /* We check for 1T matches on all MMUs here - if the MMU
702 * doesn't have 1T segment support, we will have prevented 1T
703 * entries from being inserted in the slbmte code. */
704 if (((slb
->esid
== esid_256M
) &&
705 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
706 || ((slb
->esid
== esid_1T
) &&
707 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
715 /*****************************************************************************/
718 void helper_slbia(CPUPPCState
*env
)
720 int n
, do_invalidate
;
723 /* XXX: Warning: slbia never invalidates the first segment */
724 for (n
= 1; n
< env
->slb_nr
; n
++) {
725 ppc_slb_t
*slb
= &env
->slb
[n
];
727 if (slb
->esid
& SLB_ESID_V
) {
728 slb
->esid
&= ~SLB_ESID_V
;
729 /* XXX: given the fact that segment size is 256 MB or 1TB,
730 * and we still don't have a tlb_flush_mask(env, n, mask)
731 * in QEMU, we just invalidate all TLBs
741 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
745 slb
= slb_lookup(env
, addr
);
750 if (slb
->esid
& SLB_ESID_V
) {
751 slb
->esid
&= ~SLB_ESID_V
;
753 /* XXX: given the fact that segment size is 256 MB or 1TB,
754 * and we still don't have a tlb_flush_mask(env, n, mask)
755 * in QEMU, we just invalidate all TLBs
761 int ppc_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
763 int slot
= rb
& 0xfff;
764 ppc_slb_t
*slb
= &env
->slb
[slot
];
766 if (rb
& (0x1000 - env
->slb_nr
)) {
767 return -1; /* Reserved bits set or slot too high */
769 if (rs
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
770 return -1; /* Bad segment size */
772 if ((rs
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
773 return -1; /* 1T segment on MMU that doesn't support it */
776 /* Mask out the slot number as we store the entry */
777 slb
->esid
= rb
& (SLB_ESID_ESID
| SLB_ESID_V
);
780 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
781 " %016" PRIx64
"\n", __func__
, slot
, rb
, rs
,
782 slb
->esid
, slb
->vsid
);
787 static int ppc_load_slb_esid(CPUPPCState
*env
, target_ulong rb
,
790 int slot
= rb
& 0xfff;
791 ppc_slb_t
*slb
= &env
->slb
[slot
];
793 if (slot
>= env
->slb_nr
) {
801 static int ppc_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
,
804 int slot
= rb
& 0xfff;
805 ppc_slb_t
*slb
= &env
->slb
[slot
];
807 if (slot
>= env
->slb_nr
) {
814 #endif /* defined(TARGET_PPC64) */
816 /* Perform segment based translation */
817 static inline int get_segment(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
818 target_ulong eaddr
, int rw
, int type
)
822 int ds
, pr
, target_page_bits
;
827 #if defined(TARGET_PPC64)
828 if (env
->mmu_model
& POWERPC_MMU_64
) {
830 target_ulong pageaddr
;
833 LOG_MMU("Check SLBs\n");
834 slb
= slb_lookup(env
, eaddr
);
839 if (slb
->vsid
& SLB_VSID_B
) {
840 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
843 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
847 target_page_bits
= (slb
->vsid
& SLB_VSID_L
)
848 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
849 ctx
->key
= !!(pr
? (slb
->vsid
& SLB_VSID_KP
)
850 : (slb
->vsid
& SLB_VSID_KS
));
852 ctx
->nx
= !!(slb
->vsid
& SLB_VSID_N
);
854 pageaddr
= eaddr
& ((1ULL << segment_bits
)
855 - (1ULL << target_page_bits
));
856 if (slb
->vsid
& SLB_VSID_B
) {
857 hash
= vsid
^ (vsid
<< 25) ^ (pageaddr
>> target_page_bits
);
859 hash
= vsid
^ (pageaddr
>> target_page_bits
);
861 /* Only 5 bits of the page index are used in the AVPN */
862 ctx
->ptem
= (slb
->vsid
& SLB_VSID_PTEM
) |
863 ((pageaddr
>> 16) & ((1ULL << segment_bits
) - 0x80));
865 #endif /* defined(TARGET_PPC64) */
867 target_ulong sr
, pgidx
;
869 sr
= env
->sr
[eaddr
>> 28];
870 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
871 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
872 ds
= sr
& 0x80000000 ? 1 : 0;
873 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
874 vsid
= sr
& 0x00FFFFFF;
875 target_page_bits
= TARGET_PAGE_BITS
;
876 LOG_MMU("Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
" nip="
877 TARGET_FMT_lx
" lr=" TARGET_FMT_lx
878 " ir=%d dr=%d pr=%d %d t=%d\n",
879 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
880 (int)msr_dr
, pr
!= 0 ? 1 : 0, rw
, type
);
881 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
883 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
885 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
886 ctx
->key
, ds
, ctx
->nx
, vsid
);
889 /* Check if instruction fetch is allowed, if needed */
890 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
891 /* Page address translation */
892 LOG_MMU("htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
893 " hash " TARGET_FMT_plx
"\n",
894 env
->htab_base
, env
->htab_mask
, hash
);
896 ctx
->hash
[1] = ~hash
;
898 /* Initialize real address with an invalid value */
899 ctx
->raddr
= (hwaddr
)-1ULL;
900 if (unlikely(env
->mmu_model
== POWERPC_MMU_SOFT_6xx
||
901 env
->mmu_model
== POWERPC_MMU_SOFT_74xx
)) {
902 /* Software TLB search */
903 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, rw
, type
);
905 LOG_MMU("0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
906 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
907 " hash=" TARGET_FMT_plx
"\n",
908 env
->htab_base
, env
->htab_mask
, vsid
, ctx
->ptem
,
910 /* Primary table lookup */
911 ret
= find_pte(env
, ctx
, 0, rw
, type
, target_page_bits
);
913 /* Secondary table lookup */
914 if (eaddr
!= 0xEFFFFFFF) {
915 LOG_MMU("1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
916 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
917 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
918 env
->htab_mask
, vsid
, ctx
->ptem
, ctx
->hash
[1]);
920 ret2
= find_pte(env
, ctx
, 1, rw
, type
,
927 #if defined(DUMP_PAGE_TABLES)
928 if (qemu_log_enabled()) {
930 uint32_t a0
, a1
, a2
, a3
;
932 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
933 "\n", sdr
, mask
+ 0x80);
934 for (curaddr
= sdr
; curaddr
< (sdr
+ mask
+ 0x80);
936 a0
= ldl_phys(curaddr
);
937 a1
= ldl_phys(curaddr
+ 4);
938 a2
= ldl_phys(curaddr
+ 8);
939 a3
= ldl_phys(curaddr
+ 12);
940 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
941 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
942 curaddr
, a0
, a1
, a2
, a3
);
948 LOG_MMU("No access allowed\n");
954 LOG_MMU("direct store...\n");
955 /* Direct-store segment : absolutely *BUGGY* for now */
957 /* Direct-store implies a 32-bit MMU.
958 * Check the Segment Register's bus unit ID (BUID).
960 sr
= env
->sr
[eaddr
>> 28];
961 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
962 /* Memory-forced I/O controller interface access */
963 /* If T=1 and BUID=x'07F', the 601 performs a memory access
964 * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
966 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
967 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
973 /* Integer load/store : only access allowed */
976 /* No code fetch is allowed in direct-store areas */
979 /* Floating point load/store */
982 /* lwarx, ldarx or srwcx. */
985 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
986 /* Should make the instruction do no-op.
987 * As it already do no-op, it's quite easy :-)
995 qemu_log("ERROR: instruction should not need "
996 "address translation\n");
999 if ((rw
== 1 || ctx
->key
!= 1) && (rw
== 0 || ctx
->key
!= 0)) {
1010 /* Generic TLB check function for embedded PowerPC implementations */
1011 static int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1013 target_ulong address
, uint32_t pid
, int ext
,
1018 /* Check valid flag */
1019 if (!(tlb
->prot
& PAGE_VALID
)) {
1022 mask
= ~(tlb
->size
- 1);
1023 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
1024 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
1025 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
1027 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
1030 /* Check effective address */
1031 if ((address
& mask
) != tlb
->EPN
) {
1034 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
1036 /* Extend the physical address to 36 bits */
1037 *raddrp
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
1043 /* Generic TLB search function for PowerPC embedded implementations */
1044 static int ppcemb_tlb_search(CPUPPCState
*env
, target_ulong address
,
1051 /* Default return value is no match */
1053 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1054 tlb
= &env
->tlb
.tlbe
[i
];
1055 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
1064 /* Helpers specific to PowerPC 40x implementations */
1065 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState
*env
)
1070 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1071 tlb
= &env
->tlb
.tlbe
[i
];
1072 tlb
->prot
&= ~PAGE_VALID
;
1077 static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState
*env
,
1078 target_ulong eaddr
, uint32_t pid
)
1080 #if !defined(FLUSH_ALL_TLBS)
1083 target_ulong page
, end
;
1086 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1087 tlb
= &env
->tlb
.tlbe
[i
];
1088 if (ppcemb_tlb_check(env
, tlb
, &raddr
, eaddr
, pid
, 0, i
) == 0) {
1089 end
= tlb
->EPN
+ tlb
->size
;
1090 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
1091 tlb_flush_page(env
, page
);
1093 tlb
->prot
&= ~PAGE_VALID
;
1098 ppc4xx_tlb_invalidate_all(env
);
1102 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1103 target_ulong address
, int rw
,
1108 int i
, ret
, zsel
, zpr
, pr
;
1111 raddr
= (hwaddr
)-1ULL;
1113 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1114 tlb
= &env
->tlb
.tlbe
[i
];
1115 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
1116 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
1119 zsel
= (tlb
->attr
>> 4) & 0xF;
1120 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
1121 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1122 __func__
, i
, zsel
, zpr
, rw
, tlb
->attr
);
1123 /* Check execute enable bit */
1131 /* All accesses granted */
1132 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1137 /* Raise Zone protection fault. */
1138 env
->spr
[SPR_40x_ESR
] = 1 << 22;
1146 /* Check from TLB entry */
1147 ctx
->prot
= tlb
->prot
;
1148 ret
= check_prot(ctx
->prot
, rw
, access_type
);
1150 env
->spr
[SPR_40x_ESR
] = 0;
1156 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1157 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1162 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1163 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1168 void store_40x_sler(CPUPPCState
*env
, uint32_t val
)
1170 /* XXX: TO BE FIXED */
1171 if (val
!= 0x00000000) {
1172 cpu_abort(env
, "Little-endian regions are not supported by now\n");
1174 env
->spr
[SPR_405_SLER
] = val
;
1177 static inline int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1178 hwaddr
*raddr
, int *prot
,
1179 target_ulong address
, int rw
,
1180 int access_type
, int i
)
1184 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1185 env
->spr
[SPR_BOOKE_PID
],
1186 !env
->nb_pids
, i
) >= 0) {
1190 if (env
->spr
[SPR_BOOKE_PID1
] &&
1191 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1192 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
1196 if (env
->spr
[SPR_BOOKE_PID2
] &&
1197 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1198 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
1202 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1208 prot2
= tlb
->prot
& 0xF;
1210 prot2
= (tlb
->prot
>> 4) & 0xF;
1213 /* Check the address space */
1214 if (access_type
== ACCESS_CODE
) {
1215 if (msr_ir
!= (tlb
->attr
& 1)) {
1216 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1221 if (prot2
& PAGE_EXEC
) {
1222 LOG_SWTLB("%s: good TLB!\n", __func__
);
1226 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1229 if (msr_dr
!= (tlb
->attr
& 1)) {
1230 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1235 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1236 LOG_SWTLB("%s: found TLB!\n", __func__
);
1240 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1247 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1248 target_ulong address
, int rw
,
1256 raddr
= (hwaddr
)-1ULL;
1257 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1258 tlb
= &env
->tlb
.tlbe
[i
];
1259 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
, rw
,
1268 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1269 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1272 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1273 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1279 void booke206_flush_tlb(CPUPPCState
*env
, int flags
, const int check_iprot
)
1283 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
1285 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1286 if (flags
& (1 << i
)) {
1287 tlb_size
= booke206_tlb_size(env
, i
);
1288 for (j
= 0; j
< tlb_size
; j
++) {
1289 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
1290 tlb
[j
].mas1
&= ~MAS1_VALID
;
1294 tlb
+= booke206_tlb_size(env
, i
);
1300 hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
,
1305 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1307 return 1024ULL << tlbm_size
;
1310 /* TLB check function for MAS based SoftTLBs */
1311 int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1313 target_ulong address
, uint32_t pid
)
1318 /* Check valid flag */
1319 if (!(tlb
->mas1
& MAS1_VALID
)) {
1323 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
1324 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
1325 PRIx64
" mask=0x" TARGET_FMT_lx
" MAS7_3=0x%" PRIx64
" MAS8=%x\n",
1326 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
, tlb
->mas7_3
,
1330 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
1331 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
1335 /* Check effective address */
1336 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
1341 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
1347 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1348 hwaddr
*raddr
, int *prot
,
1349 target_ulong address
, int rw
,
1355 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1356 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
1360 if (env
->spr
[SPR_BOOKE_PID1
] &&
1361 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1362 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
1366 if (env
->spr
[SPR_BOOKE_PID2
] &&
1367 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1368 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
1372 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1378 if (tlb
->mas7_3
& MAS3_UR
) {
1381 if (tlb
->mas7_3
& MAS3_UW
) {
1382 prot2
|= PAGE_WRITE
;
1384 if (tlb
->mas7_3
& MAS3_UX
) {
1388 if (tlb
->mas7_3
& MAS3_SR
) {
1391 if (tlb
->mas7_3
& MAS3_SW
) {
1392 prot2
|= PAGE_WRITE
;
1394 if (tlb
->mas7_3
& MAS3_SX
) {
1399 /* Check the address space and permissions */
1400 if (access_type
== ACCESS_CODE
) {
1401 if (msr_ir
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1402 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1407 if (prot2
& PAGE_EXEC
) {
1408 LOG_SWTLB("%s: good TLB!\n", __func__
);
1412 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1415 if (msr_dr
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1416 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1421 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1422 LOG_SWTLB("%s: found TLB!\n", __func__
);
1426 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1433 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1434 target_ulong address
, int rw
,
1442 raddr
= (hwaddr
)-1ULL;
1444 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1445 int ways
= booke206_tlb_ways(env
, i
);
1447 for (j
= 0; j
< ways
; j
++) {
1448 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1452 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1464 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1465 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1468 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1469 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1475 static const char *book3e_tsize_to_str
[32] = {
1476 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1477 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1478 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1482 static void mmubooke_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1485 ppcemb_tlb_t
*entry
;
1488 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1489 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1493 cpu_fprintf(f
, "\nTLB:\n");
1494 cpu_fprintf(f
, "Effective Physical Size PID Prot "
1497 entry
= &env
->tlb
.tlbe
[0];
1498 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
1501 uint64_t size
= (uint64_t)entry
->size
;
1504 /* Check valid flag */
1505 if (!(entry
->prot
& PAGE_VALID
)) {
1509 mask
= ~(entry
->size
- 1);
1510 ea
= entry
->EPN
& mask
;
1511 pa
= entry
->RPN
& mask
;
1512 /* Extend the physical address to 36 bits */
1513 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
1516 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ 1024);
1518 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
);
1520 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
1521 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
1522 entry
->prot
, entry
->attr
);
1527 static void mmubooke206_dump_one_tlb(FILE *f
, fprintf_function cpu_fprintf
,
1528 CPUPPCState
*env
, int tlbn
, int offset
,
1531 ppcmas_tlb_t
*entry
;
1534 cpu_fprintf(f
, "\nTLB%d:\n", tlbn
);
1535 cpu_fprintf(f
, "Effective Physical Size TID TS SRWX"
1536 " URWX WIMGE U0123\n");
1538 entry
= &env
->tlb
.tlbm
[offset
];
1539 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
1540 hwaddr ea
, pa
, size
;
1543 if (!(entry
->mas1
& MAS1_VALID
)) {
1547 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1548 size
= 1024ULL << tsize
;
1549 ea
= entry
->mas2
& ~(size
- 1);
1550 pa
= entry
->mas7_3
& ~(size
- 1);
1552 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
1553 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1554 (uint64_t)ea
, (uint64_t)pa
,
1555 book3e_tsize_to_str
[tsize
],
1556 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1557 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1558 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1559 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1560 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1561 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1562 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1563 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1564 entry
->mas2
& MAS2_W
? 'W' : '-',
1565 entry
->mas2
& MAS2_I
? 'I' : '-',
1566 entry
->mas2
& MAS2_M
? 'M' : '-',
1567 entry
->mas2
& MAS2_G
? 'G' : '-',
1568 entry
->mas2
& MAS2_E
? 'E' : '-',
1569 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1570 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1571 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1572 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1576 static void mmubooke206_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1582 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1583 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1587 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1588 int size
= booke206_tlb_size(env
, i
);
1594 mmubooke206_dump_one_tlb(f
, cpu_fprintf
, env
, i
, offset
, size
);
1599 #if defined(TARGET_PPC64)
1600 static void mmubooks_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1604 uint64_t slbe
, slbv
;
1606 cpu_synchronize_state(env
);
1608 cpu_fprintf(f
, "SLB\tESID\t\t\tVSID\n");
1609 for (i
= 0; i
< env
->slb_nr
; i
++) {
1610 slbe
= env
->slb
[i
].esid
;
1611 slbv
= env
->slb
[i
].vsid
;
1612 if (slbe
== 0 && slbv
== 0) {
1615 cpu_fprintf(f
, "%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
1621 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUPPCState
*env
)
1623 switch (env
->mmu_model
) {
1624 case POWERPC_MMU_BOOKE
:
1625 mmubooke_dump_mmu(f
, cpu_fprintf
, env
);
1627 case POWERPC_MMU_BOOKE206
:
1628 mmubooke206_dump_mmu(f
, cpu_fprintf
, env
);
1630 #if defined(TARGET_PPC64)
1631 case POWERPC_MMU_64B
:
1632 case POWERPC_MMU_2_06
:
1633 case POWERPC_MMU_2_06d
:
1634 mmubooks_dump_mmu(f
, cpu_fprintf
, env
);
1638 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1642 static inline int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1643 target_ulong eaddr
, int rw
)
1648 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1650 switch (env
->mmu_model
) {
1651 case POWERPC_MMU_32B
:
1652 case POWERPC_MMU_601
:
1653 case POWERPC_MMU_SOFT_6xx
:
1654 case POWERPC_MMU_SOFT_74xx
:
1655 case POWERPC_MMU_SOFT_4xx
:
1656 case POWERPC_MMU_REAL
:
1657 case POWERPC_MMU_BOOKE
:
1658 ctx
->prot
|= PAGE_WRITE
;
1660 #if defined(TARGET_PPC64)
1661 case POWERPC_MMU_620
:
1662 case POWERPC_MMU_64B
:
1663 case POWERPC_MMU_2_06
:
1664 case POWERPC_MMU_2_06d
:
1665 /* Real address are 60 bits long */
1666 ctx
->raddr
&= 0x0FFFFFFFFFFFFFFFULL
;
1667 ctx
->prot
|= PAGE_WRITE
;
1670 case POWERPC_MMU_SOFT_4xx_Z
:
1671 if (unlikely(msr_pe
!= 0)) {
1672 /* 403 family add some particular protections,
1673 * using PBL/PBU registers for accesses with no translation.
1676 /* Check PLB validity */
1677 (env
->pb
[0] < env
->pb
[1] &&
1678 /* and address in plb area */
1679 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1680 (env
->pb
[2] < env
->pb
[3] &&
1681 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1682 if (in_plb
^ msr_px
) {
1683 /* Access in protected area */
1685 /* Access is not allowed */
1689 /* Read-write access is allowed */
1690 ctx
->prot
|= PAGE_WRITE
;
1694 case POWERPC_MMU_MPC8xx
:
1696 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1698 case POWERPC_MMU_BOOKE206
:
1699 cpu_abort(env
, "BookE 2.06 MMU doesn't have physical real mode\n");
1702 cpu_abort(env
, "Unknown or invalid MMU model\n");
1709 int get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
, target_ulong eaddr
,
1710 int rw
, int access_type
)
1715 qemu_log("%s\n", __func__
);
1717 if ((access_type
== ACCESS_CODE
&& msr_ir
== 0) ||
1718 (access_type
!= ACCESS_CODE
&& msr_dr
== 0)) {
1719 if (env
->mmu_model
== POWERPC_MMU_BOOKE
) {
1720 /* The BookE MMU always performs address translation. The
1721 IS and DS bits only affect the address space. */
1722 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1724 } else if (env
->mmu_model
== POWERPC_MMU_BOOKE206
) {
1725 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1728 /* No address translation. */
1729 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1733 switch (env
->mmu_model
) {
1734 case POWERPC_MMU_32B
:
1735 case POWERPC_MMU_601
:
1736 case POWERPC_MMU_SOFT_6xx
:
1737 case POWERPC_MMU_SOFT_74xx
:
1738 /* Try to find a BAT */
1739 if (env
->nb_BATs
!= 0) {
1740 ret
= get_bat(env
, ctx
, eaddr
, rw
, access_type
);
1742 #if defined(TARGET_PPC64)
1743 case POWERPC_MMU_620
:
1744 case POWERPC_MMU_64B
:
1745 case POWERPC_MMU_2_06
:
1746 case POWERPC_MMU_2_06d
:
1749 /* We didn't match any BAT entry or don't have BATs */
1750 ret
= get_segment(env
, ctx
, eaddr
, rw
, access_type
);
1753 case POWERPC_MMU_SOFT_4xx
:
1754 case POWERPC_MMU_SOFT_4xx_Z
:
1755 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1758 case POWERPC_MMU_BOOKE
:
1759 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1762 case POWERPC_MMU_BOOKE206
:
1763 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1766 case POWERPC_MMU_MPC8xx
:
1768 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1770 case POWERPC_MMU_REAL
:
1771 cpu_abort(env
, "PowerPC in real mode do not do any translation\n");
1774 cpu_abort(env
, "Unknown or invalid MMU model\n");
1779 qemu_log("%s address " TARGET_FMT_lx
" => %d " TARGET_FMT_plx
"\n",
1780 __func__
, eaddr
, ret
, ctx
->raddr
);
1786 hwaddr
cpu_get_phys_page_debug(CPUPPCState
*env
, target_ulong addr
)
1790 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) != 0)) {
1794 return ctx
.raddr
& TARGET_PAGE_MASK
;
1797 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1800 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1801 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1802 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1803 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1804 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1805 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1808 if (((rw
== 2) && msr_ir
) || ((rw
!= 2) && msr_dr
)) {
1809 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1810 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1813 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1814 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1816 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1817 case MAS4_TIDSELD_PID0
:
1818 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID
] << MAS1_TID_SHIFT
;
1820 case MAS4_TIDSELD_PID1
:
1821 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID1
] << MAS1_TID_SHIFT
;
1823 case MAS4_TIDSELD_PID2
:
1824 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID2
] << MAS1_TID_SHIFT
;
1828 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1830 /* next victim logic */
1831 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1833 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1834 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1837 /* Perform address translation */
1838 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
1848 access_type
= ACCESS_CODE
;
1851 access_type
= env
->access_type
;
1853 ret
= get_physical_address(env
, &ctx
, address
, rw
, access_type
);
1855 tlb_set_page(env
, address
& TARGET_PAGE_MASK
,
1856 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1857 mmu_idx
, TARGET_PAGE_SIZE
);
1859 } else if (ret
< 0) {
1861 if (access_type
== ACCESS_CODE
) {
1864 /* No matches in page tables or TLB */
1865 switch (env
->mmu_model
) {
1866 case POWERPC_MMU_SOFT_6xx
:
1867 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1868 env
->error_code
= 1 << 18;
1869 env
->spr
[SPR_IMISS
] = address
;
1870 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1872 case POWERPC_MMU_SOFT_74xx
:
1873 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1875 case POWERPC_MMU_SOFT_4xx
:
1876 case POWERPC_MMU_SOFT_4xx_Z
:
1877 env
->exception_index
= POWERPC_EXCP_ITLB
;
1878 env
->error_code
= 0;
1879 env
->spr
[SPR_40x_DEAR
] = address
;
1880 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1882 case POWERPC_MMU_32B
:
1883 case POWERPC_MMU_601
:
1884 #if defined(TARGET_PPC64)
1885 case POWERPC_MMU_620
:
1886 case POWERPC_MMU_64B
:
1887 case POWERPC_MMU_2_06
:
1888 case POWERPC_MMU_2_06d
:
1890 env
->exception_index
= POWERPC_EXCP_ISI
;
1891 env
->error_code
= 0x40000000;
1893 case POWERPC_MMU_BOOKE206
:
1894 booke206_update_mas_tlb_miss(env
, address
, rw
);
1896 case POWERPC_MMU_BOOKE
:
1897 env
->exception_index
= POWERPC_EXCP_ITLB
;
1898 env
->error_code
= 0;
1899 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1901 case POWERPC_MMU_MPC8xx
:
1903 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1905 case POWERPC_MMU_REAL
:
1906 cpu_abort(env
, "PowerPC in real mode should never raise "
1907 "any MMU exceptions\n");
1910 cpu_abort(env
, "Unknown or invalid MMU model\n");
1915 /* Access rights violation */
1916 env
->exception_index
= POWERPC_EXCP_ISI
;
1917 env
->error_code
= 0x08000000;
1920 /* No execute protection violation */
1921 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1922 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1923 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1925 env
->exception_index
= POWERPC_EXCP_ISI
;
1926 env
->error_code
= 0x10000000;
1929 /* Direct store exception */
1930 /* No code fetch is allowed in direct-store areas */
1931 env
->exception_index
= POWERPC_EXCP_ISI
;
1932 env
->error_code
= 0x10000000;
1934 #if defined(TARGET_PPC64)
1936 /* No match in segment table */
1937 if (env
->mmu_model
== POWERPC_MMU_620
) {
1938 env
->exception_index
= POWERPC_EXCP_ISI
;
1939 /* XXX: this might be incorrect */
1940 env
->error_code
= 0x40000000;
1942 env
->exception_index
= POWERPC_EXCP_ISEG
;
1943 env
->error_code
= 0;
1951 /* No matches in page tables or TLB */
1952 switch (env
->mmu_model
) {
1953 case POWERPC_MMU_SOFT_6xx
:
1955 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1956 env
->error_code
= 1 << 16;
1958 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1959 env
->error_code
= 0;
1961 env
->spr
[SPR_DMISS
] = address
;
1962 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1964 env
->error_code
|= ctx
.key
<< 19;
1965 env
->spr
[SPR_HASH1
] = env
->htab_base
+
1966 get_pteg_offset(env
, ctx
.hash
[0], HASH_PTE_SIZE_32
);
1967 env
->spr
[SPR_HASH2
] = env
->htab_base
+
1968 get_pteg_offset(env
, ctx
.hash
[1], HASH_PTE_SIZE_32
);
1970 case POWERPC_MMU_SOFT_74xx
:
1972 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1974 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1977 /* Implement LRU algorithm */
1978 env
->error_code
= ctx
.key
<< 19;
1979 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1980 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1981 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1983 case POWERPC_MMU_SOFT_4xx
:
1984 case POWERPC_MMU_SOFT_4xx_Z
:
1985 env
->exception_index
= POWERPC_EXCP_DTLB
;
1986 env
->error_code
= 0;
1987 env
->spr
[SPR_40x_DEAR
] = address
;
1989 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1991 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1994 case POWERPC_MMU_32B
:
1995 case POWERPC_MMU_601
:
1996 #if defined(TARGET_PPC64)
1997 case POWERPC_MMU_620
:
1998 case POWERPC_MMU_64B
:
1999 case POWERPC_MMU_2_06
:
2000 case POWERPC_MMU_2_06d
:
2002 env
->exception_index
= POWERPC_EXCP_DSI
;
2003 env
->error_code
= 0;
2004 env
->spr
[SPR_DAR
] = address
;
2006 env
->spr
[SPR_DSISR
] = 0x42000000;
2008 env
->spr
[SPR_DSISR
] = 0x40000000;
2011 case POWERPC_MMU_MPC8xx
:
2013 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2015 case POWERPC_MMU_BOOKE206
:
2016 booke206_update_mas_tlb_miss(env
, address
, rw
);
2018 case POWERPC_MMU_BOOKE
:
2019 env
->exception_index
= POWERPC_EXCP_DTLB
;
2020 env
->error_code
= 0;
2021 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2022 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2024 case POWERPC_MMU_REAL
:
2025 cpu_abort(env
, "PowerPC in real mode should never raise "
2026 "any MMU exceptions\n");
2029 cpu_abort(env
, "Unknown or invalid MMU model\n");
2034 /* Access rights violation */
2035 env
->exception_index
= POWERPC_EXCP_DSI
;
2036 env
->error_code
= 0;
2037 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
2038 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
2039 env
->spr
[SPR_40x_DEAR
] = address
;
2041 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
2043 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
2044 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
2045 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2046 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2048 env
->spr
[SPR_DAR
] = address
;
2050 env
->spr
[SPR_DSISR
] = 0x0A000000;
2052 env
->spr
[SPR_DSISR
] = 0x08000000;
2057 /* Direct store exception */
2058 switch (access_type
) {
2060 /* Floating point load/store */
2061 env
->exception_index
= POWERPC_EXCP_ALIGN
;
2062 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
2063 env
->spr
[SPR_DAR
] = address
;
2066 /* lwarx, ldarx or stwcx. */
2067 env
->exception_index
= POWERPC_EXCP_DSI
;
2068 env
->error_code
= 0;
2069 env
->spr
[SPR_DAR
] = address
;
2071 env
->spr
[SPR_DSISR
] = 0x06000000;
2073 env
->spr
[SPR_DSISR
] = 0x04000000;
2077 /* eciwx or ecowx */
2078 env
->exception_index
= POWERPC_EXCP_DSI
;
2079 env
->error_code
= 0;
2080 env
->spr
[SPR_DAR
] = address
;
2082 env
->spr
[SPR_DSISR
] = 0x06100000;
2084 env
->spr
[SPR_DSISR
] = 0x04100000;
2088 printf("DSI: invalid exception (%d)\n", ret
);
2089 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
2091 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
2092 env
->spr
[SPR_DAR
] = address
;
2096 #if defined(TARGET_PPC64)
2098 /* No match in segment table */
2099 if (env
->mmu_model
== POWERPC_MMU_620
) {
2100 env
->exception_index
= POWERPC_EXCP_DSI
;
2101 env
->error_code
= 0;
2102 env
->spr
[SPR_DAR
] = address
;
2103 /* XXX: this might be incorrect */
2105 env
->spr
[SPR_DSISR
] = 0x42000000;
2107 env
->spr
[SPR_DSISR
] = 0x40000000;
2110 env
->exception_index
= POWERPC_EXCP_DSEG
;
2111 env
->error_code
= 0;
2112 env
->spr
[SPR_DAR
] = address
;
2119 printf("%s: set exception to %d %02x\n", __func__
,
2120 env
->exception
, env
->error_code
);
2128 /*****************************************************************************/
2129 /* BATs management */
2130 #if !defined(FLUSH_ALL_TLBS)
2131 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
2134 target_ulong base
, end
, page
;
2136 base
= BATu
& ~0x0001FFFF;
2137 end
= base
+ mask
+ 0x00020000;
2138 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
2139 TARGET_FMT_lx
")\n", base
, end
, mask
);
2140 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2141 tlb_flush_page(env
, page
);
2143 LOG_BATS("Flush done\n");
2147 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
2150 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
2151 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
2154 void helper_store_ibatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2158 dump_store_bat(env
, 'I', 0, nr
, value
);
2159 if (env
->IBAT
[0][nr
] != value
) {
2160 mask
= (value
<< 15) & 0x0FFE0000UL
;
2161 #if !defined(FLUSH_ALL_TLBS)
2162 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2164 /* When storing valid upper BAT, mask BEPI and BRPN
2165 * and invalidate all TLBs covered by this BAT
2167 mask
= (value
<< 15) & 0x0FFE0000UL
;
2168 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2169 (value
& ~0x0001FFFFUL
& ~mask
);
2170 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
2171 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2172 #if !defined(FLUSH_ALL_TLBS)
2173 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2180 void helper_store_ibatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2182 dump_store_bat(env
, 'I', 1, nr
, value
);
2183 env
->IBAT
[1][nr
] = value
;
2186 void helper_store_dbatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2190 dump_store_bat(env
, 'D', 0, nr
, value
);
2191 if (env
->DBAT
[0][nr
] != value
) {
2192 /* When storing valid upper BAT, mask BEPI and BRPN
2193 * and invalidate all TLBs covered by this BAT
2195 mask
= (value
<< 15) & 0x0FFE0000UL
;
2196 #if !defined(FLUSH_ALL_TLBS)
2197 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2199 mask
= (value
<< 15) & 0x0FFE0000UL
;
2200 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2201 (value
& ~0x0001FFFFUL
& ~mask
);
2202 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
2203 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2204 #if !defined(FLUSH_ALL_TLBS)
2205 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2212 void helper_store_dbatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2214 dump_store_bat(env
, 'D', 1, nr
, value
);
2215 env
->DBAT
[1][nr
] = value
;
2218 void helper_store_601_batu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2221 #if defined(FLUSH_ALL_TLBS)
2225 dump_store_bat(env
, 'I', 0, nr
, value
);
2226 if (env
->IBAT
[0][nr
] != value
) {
2227 #if defined(FLUSH_ALL_TLBS)
2230 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2231 if (env
->IBAT
[1][nr
] & 0x40) {
2232 /* Invalidate BAT only if it is valid */
2233 #if !defined(FLUSH_ALL_TLBS)
2234 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2239 /* When storing valid upper BAT, mask BEPI and BRPN
2240 * and invalidate all TLBs covered by this BAT
2242 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2243 (value
& ~0x0001FFFFUL
& ~mask
);
2244 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
2245 if (env
->IBAT
[1][nr
] & 0x40) {
2246 #if !defined(FLUSH_ALL_TLBS)
2247 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2252 #if defined(FLUSH_ALL_TLBS)
2260 void helper_store_601_batl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2263 #if defined(FLUSH_ALL_TLBS)
2267 dump_store_bat(env
, 'I', 1, nr
, value
);
2268 if (env
->IBAT
[1][nr
] != value
) {
2269 #if defined(FLUSH_ALL_TLBS)
2272 if (env
->IBAT
[1][nr
] & 0x40) {
2273 #if !defined(FLUSH_ALL_TLBS)
2274 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2275 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2281 #if !defined(FLUSH_ALL_TLBS)
2282 mask
= (value
<< 17) & 0x0FFE0000UL
;
2283 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2288 env
->IBAT
[1][nr
] = value
;
2289 env
->DBAT
[1][nr
] = value
;
2290 #if defined(FLUSH_ALL_TLBS)
2298 /*****************************************************************************/
2299 /* TLB management */
2300 void ppc_tlb_invalidate_all(CPUPPCState
*env
)
2302 switch (env
->mmu_model
) {
2303 case POWERPC_MMU_SOFT_6xx
:
2304 case POWERPC_MMU_SOFT_74xx
:
2305 ppc6xx_tlb_invalidate_all(env
);
2307 case POWERPC_MMU_SOFT_4xx
:
2308 case POWERPC_MMU_SOFT_4xx_Z
:
2309 ppc4xx_tlb_invalidate_all(env
);
2311 case POWERPC_MMU_REAL
:
2312 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2314 case POWERPC_MMU_MPC8xx
:
2316 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2318 case POWERPC_MMU_BOOKE
:
2321 case POWERPC_MMU_BOOKE206
:
2322 booke206_flush_tlb(env
, -1, 0);
2324 case POWERPC_MMU_32B
:
2325 case POWERPC_MMU_601
:
2326 #if defined(TARGET_PPC64)
2327 case POWERPC_MMU_620
:
2328 case POWERPC_MMU_64B
:
2329 case POWERPC_MMU_2_06
:
2330 case POWERPC_MMU_2_06d
:
2331 #endif /* defined(TARGET_PPC64) */
2336 cpu_abort(env
, "Unknown MMU model\n");
2341 void ppc_tlb_invalidate_one(CPUPPCState
*env
, target_ulong addr
)
2343 #if !defined(FLUSH_ALL_TLBS)
2344 addr
&= TARGET_PAGE_MASK
;
2345 switch (env
->mmu_model
) {
2346 case POWERPC_MMU_SOFT_6xx
:
2347 case POWERPC_MMU_SOFT_74xx
:
2348 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
2349 if (env
->id_tlbs
== 1) {
2350 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
2353 case POWERPC_MMU_SOFT_4xx
:
2354 case POWERPC_MMU_SOFT_4xx_Z
:
2355 ppc4xx_tlb_invalidate_virt(env
, addr
, env
->spr
[SPR_40x_PID
]);
2357 case POWERPC_MMU_REAL
:
2358 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2360 case POWERPC_MMU_MPC8xx
:
2362 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2364 case POWERPC_MMU_BOOKE
:
2366 cpu_abort(env
, "BookE MMU model is not implemented\n");
2368 case POWERPC_MMU_BOOKE206
:
2370 cpu_abort(env
, "BookE 2.06 MMU model is not implemented\n");
2372 case POWERPC_MMU_32B
:
2373 case POWERPC_MMU_601
:
2374 /* tlbie invalidate TLBs for all segments */
2375 addr
&= ~((target_ulong
)-1ULL << 28);
2376 /* XXX: this case should be optimized,
2377 * giving a mask to tlb_flush_page
2379 tlb_flush_page(env
, addr
| (0x0 << 28));
2380 tlb_flush_page(env
, addr
| (0x1 << 28));
2381 tlb_flush_page(env
, addr
| (0x2 << 28));
2382 tlb_flush_page(env
, addr
| (0x3 << 28));
2383 tlb_flush_page(env
, addr
| (0x4 << 28));
2384 tlb_flush_page(env
, addr
| (0x5 << 28));
2385 tlb_flush_page(env
, addr
| (0x6 << 28));
2386 tlb_flush_page(env
, addr
| (0x7 << 28));
2387 tlb_flush_page(env
, addr
| (0x8 << 28));
2388 tlb_flush_page(env
, addr
| (0x9 << 28));
2389 tlb_flush_page(env
, addr
| (0xA << 28));
2390 tlb_flush_page(env
, addr
| (0xB << 28));
2391 tlb_flush_page(env
, addr
| (0xC << 28));
2392 tlb_flush_page(env
, addr
| (0xD << 28));
2393 tlb_flush_page(env
, addr
| (0xE << 28));
2394 tlb_flush_page(env
, addr
| (0xF << 28));
2396 #if defined(TARGET_PPC64)
2397 case POWERPC_MMU_620
:
2398 case POWERPC_MMU_64B
:
2399 case POWERPC_MMU_2_06
:
2400 case POWERPC_MMU_2_06d
:
2401 /* tlbie invalidate TLBs for all segments */
2402 /* XXX: given the fact that there are too many segments to invalidate,
2403 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2404 * we just invalidate all TLBs
2408 #endif /* defined(TARGET_PPC64) */
2411 cpu_abort(env
, "Unknown MMU model\n");
2415 ppc_tlb_invalidate_all(env
);
2419 /*****************************************************************************/
2420 /* Special registers manipulation */
2421 #if defined(TARGET_PPC64)
2422 void ppc_store_asr(CPUPPCState
*env
, target_ulong value
)
2424 if (env
->asr
!= value
) {
2431 void ppc_store_sdr1(CPUPPCState
*env
, target_ulong value
)
2433 LOG_MMU("%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2434 if (env
->spr
[SPR_SDR1
] != value
) {
2435 env
->spr
[SPR_SDR1
] = value
;
2436 #if defined(TARGET_PPC64)
2437 if (env
->mmu_model
& POWERPC_MMU_64
) {
2438 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
2440 if (htabsize
> 28) {
2441 fprintf(stderr
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
2442 " stored in SDR1\n", htabsize
);
2445 env
->htab_mask
= (1ULL << (htabsize
+ 18)) - 1;
2446 env
->htab_base
= value
& SDR_64_HTABORG
;
2448 #endif /* defined(TARGET_PPC64) */
2450 /* FIXME: Should check for valid HTABMASK values */
2451 env
->htab_mask
= ((value
& SDR_32_HTABMASK
) << 16) | 0xFFFF;
2452 env
->htab_base
= value
& SDR_32_HTABORG
;
2458 /* Segment registers load and store */
2459 target_ulong
helper_load_sr(CPUPPCState
*env
, target_ulong sr_num
)
2461 #if defined(TARGET_PPC64)
2462 if (env
->mmu_model
& POWERPC_MMU_64
) {
2467 return env
->sr
[sr_num
];
2470 void helper_store_sr(CPUPPCState
*env
, target_ulong srnum
, target_ulong value
)
2472 LOG_MMU("%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2473 (int)srnum
, value
, env
->sr
[srnum
]);
2474 #if defined(TARGET_PPC64)
2475 if (env
->mmu_model
& POWERPC_MMU_64
) {
2476 uint64_t rb
= 0, rs
= 0;
2479 rb
|= ((uint32_t)srnum
& 0xf) << 28;
2480 /* Set the valid bit */
2483 rb
|= (uint32_t)srnum
;
2486 rs
|= (value
& 0xfffffff) << 12;
2488 rs
|= ((value
>> 27) & 0xf) << 8;
2490 ppc_store_slb(env
, rb
, rs
);
2493 if (env
->sr
[srnum
] != value
) {
2494 env
->sr
[srnum
] = value
;
2495 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
2496 flusing the whole TLB. */
2497 #if !defined(FLUSH_ALL_TLBS) && 0
2499 target_ulong page
, end
;
2500 /* Invalidate 256 MB of virtual memory */
2501 page
= (16 << 20) * srnum
;
2502 end
= page
+ (16 << 20);
2503 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2504 tlb_flush_page(env
, page
);
2512 #endif /* !defined(CONFIG_USER_ONLY) */
2514 #if !defined(CONFIG_USER_ONLY)
2515 /* SLB management */
2516 #if defined(TARGET_PPC64)
2517 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
2519 if (ppc_store_slb(env
, rb
, rs
) < 0) {
2520 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2521 POWERPC_EXCP_INVAL
);
2525 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
2527 target_ulong rt
= 0;
2529 if (ppc_load_slb_esid(env
, rb
, &rt
) < 0) {
2530 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2531 POWERPC_EXCP_INVAL
);
2536 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
2538 target_ulong rt
= 0;
2540 if (ppc_load_slb_vsid(env
, rb
, &rt
) < 0) {
2541 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2542 POWERPC_EXCP_INVAL
);
2546 #endif /* defined(TARGET_PPC64) */
2548 /* TLB management */
2549 void helper_tlbia(CPUPPCState
*env
)
2551 ppc_tlb_invalidate_all(env
);
2554 void helper_tlbie(CPUPPCState
*env
, target_ulong addr
)
2556 ppc_tlb_invalidate_one(env
, addr
);
2559 /* Software driven TLBs management */
2560 /* PowerPC 602/603 software TLB load instructions helpers */
2561 static void do_6xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2563 target_ulong RPN
, CMP
, EPN
;
2566 RPN
= env
->spr
[SPR_RPA
];
2568 CMP
= env
->spr
[SPR_ICMP
];
2569 EPN
= env
->spr
[SPR_IMISS
];
2571 CMP
= env
->spr
[SPR_DCMP
];
2572 EPN
= env
->spr
[SPR_DMISS
];
2574 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
2575 (void)EPN
; /* avoid a compiler warning */
2576 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2577 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2579 /* Store this TLB */
2580 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2581 way
, is_code
, CMP
, RPN
);
2584 void helper_6xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2586 do_6xx_tlb(env
, EPN
, 0);
2589 void helper_6xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2591 do_6xx_tlb(env
, EPN
, 1);
2594 /* PowerPC 74xx software TLB load instructions helpers */
2595 static void do_74xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2597 target_ulong RPN
, CMP
, EPN
;
2600 RPN
= env
->spr
[SPR_PTELO
];
2601 CMP
= env
->spr
[SPR_PTEHI
];
2602 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
2603 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
2604 (void)EPN
; /* avoid a compiler warning */
2605 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2606 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2608 /* Store this TLB */
2609 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2610 way
, is_code
, CMP
, RPN
);
2613 void helper_74xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2615 do_74xx_tlb(env
, EPN
, 0);
2618 void helper_74xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2620 do_74xx_tlb(env
, EPN
, 1);
2623 /*****************************************************************************/
2624 /* PowerPC 601 specific instructions (POWER bridge) */
2626 target_ulong
helper_rac(CPUPPCState
*env
, target_ulong addr
)
2630 target_ulong ret
= 0;
2632 /* We don't have to generate many instances of this instruction,
2633 * as rac is supervisor only.
2635 /* XXX: FIX THIS: Pretend we have no BAT */
2636 nb_BATs
= env
->nb_BATs
;
2638 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0) {
2641 env
->nb_BATs
= nb_BATs
;
2645 static inline target_ulong
booke_tlb_to_page_size(int size
)
2647 return 1024 << (2 * size
);
2650 static inline int booke_page_size_to_tlb(target_ulong page_size
)
2654 switch (page_size
) {
2688 #if defined(TARGET_PPC64)
2689 case 0x000100000000ULL
:
2692 case 0x000400000000ULL
:
2695 case 0x001000000000ULL
:
2698 case 0x004000000000ULL
:
2701 case 0x010000000000ULL
:
2713 /* Helpers for 4xx TLB management */
2714 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2716 #define PPC4XX_TLBHI_V 0x00000040
2717 #define PPC4XX_TLBHI_E 0x00000020
2718 #define PPC4XX_TLBHI_SIZE_MIN 0
2719 #define PPC4XX_TLBHI_SIZE_MAX 7
2720 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2721 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2722 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2724 #define PPC4XX_TLBLO_EX 0x00000200
2725 #define PPC4XX_TLBLO_WR 0x00000100
2726 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2727 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2729 target_ulong
helper_4xx_tlbre_hi(CPUPPCState
*env
, target_ulong entry
)
2735 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2736 tlb
= &env
->tlb
.tlbe
[entry
];
2738 if (tlb
->prot
& PAGE_VALID
) {
2739 ret
|= PPC4XX_TLBHI_V
;
2741 size
= booke_page_size_to_tlb(tlb
->size
);
2742 if (size
< PPC4XX_TLBHI_SIZE_MIN
|| size
> PPC4XX_TLBHI_SIZE_MAX
) {
2743 size
= PPC4XX_TLBHI_SIZE_DEFAULT
;
2745 ret
|= size
<< PPC4XX_TLBHI_SIZE_SHIFT
;
2746 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
2750 target_ulong
helper_4xx_tlbre_lo(CPUPPCState
*env
, target_ulong entry
)
2755 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2756 tlb
= &env
->tlb
.tlbe
[entry
];
2758 if (tlb
->prot
& PAGE_EXEC
) {
2759 ret
|= PPC4XX_TLBLO_EX
;
2761 if (tlb
->prot
& PAGE_WRITE
) {
2762 ret
|= PPC4XX_TLBLO_WR
;
2767 void helper_4xx_tlbwe_hi(CPUPPCState
*env
, target_ulong entry
,
2771 target_ulong page
, end
;
2773 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2775 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2776 tlb
= &env
->tlb
.tlbe
[entry
];
2777 /* Invalidate previous TLB (if it's valid) */
2778 if (tlb
->prot
& PAGE_VALID
) {
2779 end
= tlb
->EPN
+ tlb
->size
;
2780 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
2781 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2782 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2783 tlb_flush_page(env
, page
);
2786 tlb
->size
= booke_tlb_to_page_size((val
>> PPC4XX_TLBHI_SIZE_SHIFT
)
2787 & PPC4XX_TLBHI_SIZE_MASK
);
2788 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2789 * If this ever occurs, one should use the ppcemb target instead
2790 * of the ppc or ppc64 one
2792 if ((val
& PPC4XX_TLBHI_V
) && tlb
->size
< TARGET_PAGE_SIZE
) {
2793 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
2794 "are not supported (%d)\n",
2795 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
2797 tlb
->EPN
= val
& ~(tlb
->size
- 1);
2798 if (val
& PPC4XX_TLBHI_V
) {
2799 tlb
->prot
|= PAGE_VALID
;
2800 if (val
& PPC4XX_TLBHI_E
) {
2801 /* XXX: TO BE FIXED */
2803 "Little-endian TLB entries are not supported by now\n");
2806 tlb
->prot
&= ~PAGE_VALID
;
2808 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
2809 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2810 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2811 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2812 tlb
->prot
& PAGE_READ
? 'r' : '-',
2813 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2814 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2815 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2816 /* Invalidate new TLB (if valid) */
2817 if (tlb
->prot
& PAGE_VALID
) {
2818 end
= tlb
->EPN
+ tlb
->size
;
2819 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
2820 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2821 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2822 tlb_flush_page(env
, page
);
2827 void helper_4xx_tlbwe_lo(CPUPPCState
*env
, target_ulong entry
,
2832 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2834 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2835 tlb
= &env
->tlb
.tlbe
[entry
];
2836 tlb
->attr
= val
& PPC4XX_TLBLO_ATTR_MASK
;
2837 tlb
->RPN
= val
& PPC4XX_TLBLO_RPN_MASK
;
2838 tlb
->prot
= PAGE_READ
;
2839 if (val
& PPC4XX_TLBLO_EX
) {
2840 tlb
->prot
|= PAGE_EXEC
;
2842 if (val
& PPC4XX_TLBLO_WR
) {
2843 tlb
->prot
|= PAGE_WRITE
;
2845 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2846 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2847 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2848 tlb
->prot
& PAGE_READ
? 'r' : '-',
2849 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2850 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2851 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2854 target_ulong
helper_4xx_tlbsx(CPUPPCState
*env
, target_ulong address
)
2856 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
2859 /* PowerPC 440 TLB management */
2860 void helper_440_tlbwe(CPUPPCState
*env
, uint32_t word
, target_ulong entry
,
2864 target_ulong EPN
, RPN
, size
;
2867 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
2868 __func__
, word
, (int)entry
, value
);
2871 tlb
= &env
->tlb
.tlbe
[entry
];
2874 /* Just here to please gcc */
2876 EPN
= value
& 0xFFFFFC00;
2877 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
) {
2881 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
2882 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
) {
2887 tlb
->attr
|= (value
>> 8) & 1;
2888 if (value
& 0x200) {
2889 tlb
->prot
|= PAGE_VALID
;
2891 if (tlb
->prot
& PAGE_VALID
) {
2892 tlb
->prot
&= ~PAGE_VALID
;
2896 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
2897 if (do_flush_tlbs
) {
2902 RPN
= value
& 0xFFFFFC0F;
2903 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
) {
2909 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
2910 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
2912 tlb
->prot
|= PAGE_READ
<< 4;
2915 tlb
->prot
|= PAGE_WRITE
<< 4;
2918 tlb
->prot
|= PAGE_EXEC
<< 4;
2921 tlb
->prot
|= PAGE_READ
;
2924 tlb
->prot
|= PAGE_WRITE
;
2927 tlb
->prot
|= PAGE_EXEC
;
2933 target_ulong
helper_440_tlbre(CPUPPCState
*env
, uint32_t word
,
2941 tlb
= &env
->tlb
.tlbe
[entry
];
2944 /* Just here to please gcc */
2947 size
= booke_page_size_to_tlb(tlb
->size
);
2948 if (size
< 0 || size
> 0xF) {
2952 if (tlb
->attr
& 0x1) {
2955 if (tlb
->prot
& PAGE_VALID
) {
2958 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
2959 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
2965 ret
= tlb
->attr
& ~0x1;
2966 if (tlb
->prot
& (PAGE_READ
<< 4)) {
2969 if (tlb
->prot
& (PAGE_WRITE
<< 4)) {
2972 if (tlb
->prot
& (PAGE_EXEC
<< 4)) {
2975 if (tlb
->prot
& PAGE_READ
) {
2978 if (tlb
->prot
& PAGE_WRITE
) {
2981 if (tlb
->prot
& PAGE_EXEC
) {
2989 target_ulong
helper_440_tlbsx(CPUPPCState
*env
, target_ulong address
)
2991 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
2994 /* PowerPC BookE 2.06 TLB management */
2996 static ppcmas_tlb_t
*booke206_cur_tlb(CPUPPCState
*env
)
2998 uint32_t tlbncfg
= 0;
2999 int esel
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ESEL_MASK
) >> MAS0_ESEL_SHIFT
;
3000 int ea
= (env
->spr
[SPR_BOOKE_MAS2
] & MAS2_EPN_MASK
);
3003 tlb
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
3004 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlb
];
3006 if ((tlbncfg
& TLBnCFG_HES
) && (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_HES
)) {
3007 cpu_abort(env
, "we don't support HES yet\n");
3010 return booke206_get_tlbm(env
, tlb
, ea
, esel
);
3013 void helper_booke_setpid(CPUPPCState
*env
, uint32_t pidn
, target_ulong pid
)
3015 env
->spr
[pidn
] = pid
;
3016 /* changing PIDs mean we're in a different address space now */
3020 void helper_booke206_tlbwe(CPUPPCState
*env
)
3022 uint32_t tlbncfg
, tlbn
;
3024 uint32_t size_tlb
, size_ps
;
3028 switch (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_WQ_MASK
) {
3029 case MAS0_WQ_ALWAYS
:
3030 /* good to go, write that entry */
3033 /* XXX check if reserved */
3038 case MAS0_WQ_CLR_RSRV
:
3039 /* XXX clear entry */
3042 /* no idea what to do */
3046 if (((env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ATSEL
) == MAS0_ATSEL_LRAT
) &&
3048 /* XXX we don't support direct LRAT setting yet */
3049 fprintf(stderr
, "cpu: don't support LRAT setting yet\n");
3053 tlbn
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
3054 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
3056 tlb
= booke206_cur_tlb(env
);
3059 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3060 POWERPC_EXCP_INVAL
|
3061 POWERPC_EXCP_INVAL_INVAL
);
3064 /* check that we support the targeted size */
3065 size_tlb
= (env
->spr
[SPR_BOOKE_MAS1
] & MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
3066 size_ps
= booke206_tlbnps(env
, tlbn
);
3067 if ((env
->spr
[SPR_BOOKE_MAS1
] & MAS1_VALID
) && (tlbncfg
& TLBnCFG_AVAIL
) &&
3068 !(size_ps
& (1 << size_tlb
))) {
3069 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3070 POWERPC_EXCP_INVAL
|
3071 POWERPC_EXCP_INVAL_INVAL
);
3075 cpu_abort(env
, "missing HV implementation\n");
3077 tlb
->mas7_3
= ((uint64_t)env
->spr
[SPR_BOOKE_MAS7
] << 32) |
3078 env
->spr
[SPR_BOOKE_MAS3
];
3079 tlb
->mas1
= env
->spr
[SPR_BOOKE_MAS1
];
3082 if (!(tlbncfg
& TLBnCFG_AVAIL
)) {
3083 /* force !AVAIL TLB entries to correct page size */
3084 tlb
->mas1
&= ~MAS1_TSIZE_MASK
;
3085 /* XXX can be configured in MMUCSR0 */
3086 tlb
->mas1
|= (tlbncfg
& TLBnCFG_MINSIZE
) >> 12;
3089 /* Make a mask from TLB size to discard invalid bits in EPN field */
3090 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3091 /* Add a mask for page attributes */
3092 mask
|= MAS2_ACM
| MAS2_VLE
| MAS2_W
| MAS2_I
| MAS2_M
| MAS2_G
| MAS2_E
;
3095 /* Executing a tlbwe instruction in 32-bit mode will set
3096 * bits 0:31 of the TLB EPN field to zero.
3101 tlb
->mas2
= env
->spr
[SPR_BOOKE_MAS2
] & mask
;
3103 if (!(tlbncfg
& TLBnCFG_IPROT
)) {
3104 /* no IPROT supported by TLB */
3105 tlb
->mas1
&= ~MAS1_IPROT
;
3108 if (booke206_tlb_to_page_size(env
, tlb
) == TARGET_PAGE_SIZE
) {
3109 tlb_flush_page(env
, tlb
->mas2
& MAS2_EPN_MASK
);
3115 static inline void booke206_tlb_to_mas(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
3117 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
3118 int way
= booke206_tlbm_to_way(env
, tlb
);
3120 env
->spr
[SPR_BOOKE_MAS0
] = tlbn
<< MAS0_TLBSEL_SHIFT
;
3121 env
->spr
[SPR_BOOKE_MAS0
] |= way
<< MAS0_ESEL_SHIFT
;
3122 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3124 env
->spr
[SPR_BOOKE_MAS1
] = tlb
->mas1
;
3125 env
->spr
[SPR_BOOKE_MAS2
] = tlb
->mas2
;
3126 env
->spr
[SPR_BOOKE_MAS3
] = tlb
->mas7_3
;
3127 env
->spr
[SPR_BOOKE_MAS7
] = tlb
->mas7_3
>> 32;
3130 void helper_booke206_tlbre(CPUPPCState
*env
)
3132 ppcmas_tlb_t
*tlb
= NULL
;
3134 tlb
= booke206_cur_tlb(env
);
3136 env
->spr
[SPR_BOOKE_MAS1
] = 0;
3138 booke206_tlb_to_mas(env
, tlb
);
3142 void helper_booke206_tlbsx(CPUPPCState
*env
, target_ulong address
)
3144 ppcmas_tlb_t
*tlb
= NULL
;
3149 spid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID_MASK
) >> MAS6_SPID_SHIFT
;
3150 sas
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
;
3152 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3153 int ways
= booke206_tlb_ways(env
, i
);
3155 for (j
= 0; j
< ways
; j
++) {
3156 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3162 if (ppcmas_tlb_check(env
, tlb
, &raddr
, address
, spid
)) {
3166 if (sas
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
3170 booke206_tlb_to_mas(env
, tlb
);
3175 /* no entry found, fill with defaults */
3176 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
3177 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
3178 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
3179 env
->spr
[SPR_BOOKE_MAS3
] = 0;
3180 env
->spr
[SPR_BOOKE_MAS7
] = 0;
3182 if (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
) {
3183 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
3186 env
->spr
[SPR_BOOKE_MAS1
] |= (env
->spr
[SPR_BOOKE_MAS6
] >> 16)
3189 /* next victim logic */
3190 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
3192 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
3193 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3196 static inline void booke206_invalidate_ea_tlb(CPUPPCState
*env
, int tlbn
,
3200 int ways
= booke206_tlb_ways(env
, tlbn
);
3203 for (i
= 0; i
< ways
; i
++) {
3204 ppcmas_tlb_t
*tlb
= booke206_get_tlbm(env
, tlbn
, ea
, i
);
3208 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3209 if (((tlb
->mas2
& MAS2_EPN_MASK
) == (ea
& mask
)) &&
3210 !(tlb
->mas1
& MAS1_IPROT
)) {
3211 tlb
->mas1
&= ~MAS1_VALID
;
3216 void helper_booke206_tlbivax(CPUPPCState
*env
, target_ulong address
)
3218 if (address
& 0x4) {
3219 /* flush all entries */
3220 if (address
& 0x8) {
3221 /* flush all of TLB1 */
3222 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB1
, 1);
3224 /* flush all of TLB0 */
3225 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB0
, 0);
3230 if (address
& 0x8) {
3231 /* flush TLB1 entries */
3232 booke206_invalidate_ea_tlb(env
, 1, address
);
3235 /* flush TLB0 entries */
3236 booke206_invalidate_ea_tlb(env
, 0, address
);
3237 tlb_flush_page(env
, address
& MAS2_EPN_MASK
);
3241 void helper_booke206_tlbilx0(CPUPPCState
*env
, target_ulong address
)
3243 /* XXX missing LPID handling */
3244 booke206_flush_tlb(env
, -1, 1);
3247 void helper_booke206_tlbilx1(CPUPPCState
*env
, target_ulong address
)
3250 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3251 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
3254 /* XXX missing LPID handling */
3255 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3256 tlb_size
= booke206_tlb_size(env
, i
);
3257 for (j
= 0; j
< tlb_size
; j
++) {
3258 if (!(tlb
[j
].mas1
& MAS1_IPROT
) &&
3259 ((tlb
[j
].mas1
& MAS1_TID_MASK
) == tid
)) {
3260 tlb
[j
].mas1
&= ~MAS1_VALID
;
3263 tlb
+= booke206_tlb_size(env
, i
);
3268 void helper_booke206_tlbilx3(CPUPPCState
*env
, target_ulong address
)
3272 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3273 int pid
= tid
>> MAS6_SPID_SHIFT
;
3274 int sgs
= env
->spr
[SPR_BOOKE_MAS5
] & MAS5_SGS
;
3275 int ind
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SIND
) ? MAS1_IND
: 0;
3276 /* XXX check for unsupported isize and raise an invalid opcode then */
3277 int size
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_ISIZE_MASK
;
3278 /* XXX implement MAV2 handling */
3281 /* XXX missing LPID handling */
3282 /* flush by pid and ea */
3283 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3284 int ways
= booke206_tlb_ways(env
, i
);
3286 for (j
= 0; j
< ways
; j
++) {
3287 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3291 if ((ppcmas_tlb_check(env
, tlb
, NULL
, address
, pid
) != 0) ||
3292 (tlb
->mas1
& MAS1_IPROT
) ||
3293 ((tlb
->mas1
& MAS1_IND
) != ind
) ||
3294 ((tlb
->mas8
& MAS8_TGS
) != sgs
)) {
3297 if (mav2
&& ((tlb
->mas1
& MAS1_TSIZE_MASK
) != size
)) {
3298 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
3301 /* XXX e500mc doesn't match SAS, but other cores might */
3302 tlb
->mas1
&= ~MAS1_VALID
;
3308 void helper_booke206_tlbflush(CPUPPCState
*env
, uint32_t type
)
3313 flags
|= BOOKE206_FLUSH_TLB1
;
3317 flags
|= BOOKE206_FLUSH_TLB0
;
3320 booke206_flush_tlb(env
, flags
, 1);