2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "helper_regs.h"
27 #include "qemu-common.h"
33 //#define DEBUG_SOFTWARE_TLB
34 //#define DUMP_PAGE_TABLES
35 //#define DEBUG_EXCEPTIONS
36 //#define FLUSH_ALL_TLBS
39 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
40 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
42 # define LOG_MMU(...) do { } while (0)
43 # define LOG_MMU_STATE(...) do { } while (0)
47 #ifdef DEBUG_SOFTWARE_TLB
48 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
50 # define LOG_SWTLB(...) do { } while (0)
54 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
56 # define LOG_BATS(...) do { } while (0)
60 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
62 # define LOG_SLB(...) do { } while (0)
65 #ifdef DEBUG_EXCEPTIONS
66 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
68 # define LOG_EXCP(...) do { } while (0)
71 /*****************************************************************************/
72 /* PowerPC Hypercall emulation */
74 void (*cpu_ppc_hypercall
)(CPUState
*);
76 /*****************************************************************************/
77 /* PowerPC MMU emulation */
79 #if defined(CONFIG_USER_ONLY)
80 int cpu_ppc_handle_mmu_fault (CPUState
*env
, target_ulong address
, int rw
,
83 int exception
, error_code
;
86 exception
= POWERPC_EXCP_ISI
;
87 error_code
= 0x40000000;
89 exception
= POWERPC_EXCP_DSI
;
90 error_code
= 0x40000000;
92 error_code
|= 0x02000000;
93 env
->spr
[SPR_DAR
] = address
;
94 env
->spr
[SPR_DSISR
] = error_code
;
96 env
->exception_index
= exception
;
97 env
->error_code
= error_code
;
103 /* Common routines used by software and hardware TLBs emulation */
104 static inline int pte_is_valid(target_ulong pte0
)
106 return pte0
& 0x80000000 ? 1 : 0;
109 static inline void pte_invalidate(target_ulong
*pte0
)
111 *pte0
&= ~0x80000000;
114 #if defined(TARGET_PPC64)
115 static inline int pte64_is_valid(target_ulong pte0
)
117 return pte0
& 0x0000000000000001ULL
? 1 : 0;
120 static inline void pte64_invalidate(target_ulong
*pte0
)
122 *pte0
&= ~0x0000000000000001ULL
;
126 #define PTE_PTEM_MASK 0x7FFFFFBF
127 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
128 #if defined(TARGET_PPC64)
129 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
130 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
133 static inline int pp_check(int key
, int pp
, int nx
)
137 /* Compute access rights */
138 /* When pp is 3/7, the result is undefined. Set it to noaccess */
145 access
|= PAGE_WRITE
;
163 access
= PAGE_READ
| PAGE_WRITE
;
173 static inline int check_prot(int prot
, int rw
, int access_type
)
177 if (access_type
== ACCESS_CODE
) {
178 if (prot
& PAGE_EXEC
)
183 if (prot
& PAGE_WRITE
)
188 if (prot
& PAGE_READ
)
197 static inline int _pte_check(mmu_ctx_t
*ctx
, int is_64b
, target_ulong pte0
,
198 target_ulong pte1
, int h
, int rw
, int type
)
200 target_ulong ptem
, mmask
;
201 int access
, ret
, pteh
, ptev
, pp
;
204 /* Check validity and table match */
205 #if defined(TARGET_PPC64)
207 ptev
= pte64_is_valid(pte0
);
208 pteh
= (pte0
>> 1) & 1;
212 ptev
= pte_is_valid(pte0
);
213 pteh
= (pte0
>> 6) & 1;
215 if (ptev
&& h
== pteh
) {
216 /* Check vsid & api */
217 #if defined(TARGET_PPC64)
219 ptem
= pte0
& PTE64_PTEM_MASK
;
220 mmask
= PTE64_CHECK_MASK
;
221 pp
= (pte1
& 0x00000003) | ((pte1
>> 61) & 0x00000004);
222 ctx
->nx
= (pte1
>> 2) & 1; /* No execute bit */
223 ctx
->nx
|= (pte1
>> 3) & 1; /* Guarded bit */
227 ptem
= pte0
& PTE_PTEM_MASK
;
228 mmask
= PTE_CHECK_MASK
;
229 pp
= pte1
& 0x00000003;
231 if (ptem
== ctx
->ptem
) {
232 if (ctx
->raddr
!= (target_phys_addr_t
)-1ULL) {
233 /* all matches should have equal RPN, WIMG & PP */
234 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
235 qemu_log("Bad RPN/WIMG/PP\n");
239 /* Compute access rights */
240 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
241 /* Keep the matching PTE informations */
244 ret
= check_prot(ctx
->prot
, rw
, type
);
247 LOG_MMU("PTE access granted !\n");
249 /* Access right violation */
250 LOG_MMU("PTE access rejected\n");
258 static inline int pte32_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
259 target_ulong pte1
, int h
, int rw
, int type
)
261 return _pte_check(ctx
, 0, pte0
, pte1
, h
, rw
, type
);
264 #if defined(TARGET_PPC64)
265 static inline int pte64_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
266 target_ulong pte1
, int h
, int rw
, int type
)
268 return _pte_check(ctx
, 1, pte0
, pte1
, h
, rw
, type
);
272 static inline int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
277 /* Update page flags */
278 if (!(*pte1p
& 0x00000100)) {
279 /* Update accessed flag */
280 *pte1p
|= 0x00000100;
283 if (!(*pte1p
& 0x00000080)) {
284 if (rw
== 1 && ret
== 0) {
285 /* Update changed flag */
286 *pte1p
|= 0x00000080;
289 /* Force page fault for first write access */
290 ctx
->prot
&= ~PAGE_WRITE
;
297 /* Software driven TLB helpers */
298 static inline int ppc6xx_tlb_getnum(CPUState
*env
, target_ulong eaddr
, int way
,
303 /* Select TLB num in a way from address */
304 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
306 nr
+= env
->tlb_per_way
* way
;
307 /* 6xx have separate TLBs for instructions and data */
308 if (is_code
&& env
->id_tlbs
== 1)
314 static inline void ppc6xx_tlb_invalidate_all(CPUState
*env
)
319 //LOG_SWTLB("Invalidate all TLBs\n");
320 /* Invalidate all defined software TLB */
322 if (env
->id_tlbs
== 1)
324 for (nr
= 0; nr
< max
; nr
++) {
325 tlb
= &env
->tlb
.tlb6
[nr
];
326 pte_invalidate(&tlb
->pte0
);
331 static inline void __ppc6xx_tlb_invalidate_virt(CPUState
*env
,
333 int is_code
, int match_epn
)
335 #if !defined(FLUSH_ALL_TLBS)
339 /* Invalidate ITLB + DTLB, all ways */
340 for (way
= 0; way
< env
->nb_ways
; way
++) {
341 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
342 tlb
= &env
->tlb
.tlb6
[nr
];
343 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
344 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
346 pte_invalidate(&tlb
->pte0
);
347 tlb_flush_page(env
, tlb
->EPN
);
351 /* XXX: PowerPC specification say this is valid as well */
352 ppc6xx_tlb_invalidate_all(env
);
356 static inline void ppc6xx_tlb_invalidate_virt(CPUState
*env
,
357 target_ulong eaddr
, int is_code
)
359 __ppc6xx_tlb_invalidate_virt(env
, eaddr
, is_code
, 0);
362 void ppc6xx_tlb_store (CPUState
*env
, target_ulong EPN
, int way
, int is_code
,
363 target_ulong pte0
, target_ulong pte1
)
368 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
369 tlb
= &env
->tlb
.tlb6
[nr
];
370 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
371 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
372 /* Invalidate any pending reference in Qemu for this virtual address */
373 __ppc6xx_tlb_invalidate_virt(env
, EPN
, is_code
, 1);
377 /* Store last way for LRU mechanism */
381 static inline int ppc6xx_tlb_check(CPUState
*env
, mmu_ctx_t
*ctx
,
382 target_ulong eaddr
, int rw
, int access_type
)
389 ret
= -1; /* No TLB found */
390 for (way
= 0; way
< env
->nb_ways
; way
++) {
391 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
,
392 access_type
== ACCESS_CODE
? 1 : 0);
393 tlb
= &env
->tlb
.tlb6
[nr
];
394 /* This test "emulates" the PTE index match for hardware TLBs */
395 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
396 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
397 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
398 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
399 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
402 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
403 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
404 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
405 tlb
->EPN
, eaddr
, tlb
->pte1
,
406 rw
? 'S' : 'L', access_type
== ACCESS_CODE
? 'I' : 'D');
407 switch (pte32_check(ctx
, tlb
->pte0
, tlb
->pte1
, 0, rw
, access_type
)) {
409 /* TLB inconsistency */
412 /* Access violation */
422 /* XXX: we should go on looping to check all TLBs consistency
423 * but we can speed-up the whole thing as the
424 * result would be undefined if TLBs are not consistent.
433 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
434 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
435 /* Update page flags */
436 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, rw
);
442 /* Perform BAT hit & translation */
443 static inline void bat_size_prot(CPUState
*env
, target_ulong
*blp
, int *validp
,
444 int *protp
, target_ulong
*BATu
,
450 bl
= (*BATu
& 0x00001FFC) << 15;
453 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
454 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
456 pp
= *BATl
& 0x00000003;
458 prot
= PAGE_READ
| PAGE_EXEC
;
468 static inline void bat_601_size_prot(CPUState
*env
, target_ulong
*blp
,
469 int *validp
, int *protp
,
470 target_ulong
*BATu
, target_ulong
*BATl
)
473 int key
, pp
, valid
, prot
;
475 bl
= (*BATl
& 0x0000003F) << 17;
476 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx
" msk " TARGET_FMT_lx
"\n",
477 (uint8_t)(*BATl
& 0x0000003F), bl
, ~bl
);
479 valid
= (*BATl
>> 6) & 1;
481 pp
= *BATu
& 0x00000003;
483 key
= (*BATu
>> 3) & 1;
485 key
= (*BATu
>> 2) & 1;
486 prot
= pp_check(key
, pp
, 0);
493 static inline int get_bat(CPUState
*env
, mmu_ctx_t
*ctx
, target_ulong
virtual,
496 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
497 target_ulong BEPIl
, BEPIu
, bl
;
501 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
502 type
== ACCESS_CODE
? 'I' : 'D', virtual);
505 BATlt
= env
->IBAT
[1];
506 BATut
= env
->IBAT
[0];
509 BATlt
= env
->DBAT
[1];
510 BATut
= env
->DBAT
[0];
513 for (i
= 0; i
< env
->nb_BATs
; i
++) {
516 BEPIu
= *BATu
& 0xF0000000;
517 BEPIl
= *BATu
& 0x0FFE0000;
518 if (unlikely(env
->mmu_model
== POWERPC_MMU_601
)) {
519 bat_601_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
521 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
523 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
524 " BATl " TARGET_FMT_lx
"\n", __func__
,
525 type
== ACCESS_CODE
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
526 if ((virtual & 0xF0000000) == BEPIu
&&
527 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
530 /* Get physical address */
531 ctx
->raddr
= (*BATl
& 0xF0000000) |
532 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
533 (virtual & 0x0001F000);
534 /* Compute access rights */
536 ret
= check_prot(ctx
->prot
, rw
, type
);
538 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
539 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
540 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
546 #if defined(DEBUG_BATS)
547 if (qemu_log_enabled()) {
548 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
549 for (i
= 0; i
< 4; i
++) {
552 BEPIu
= *BATu
& 0xF0000000;
553 BEPIl
= *BATu
& 0x0FFE0000;
554 bl
= (*BATu
& 0x00001FFC) << 15;
555 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
556 " BATl " TARGET_FMT_lx
" \n\t" TARGET_FMT_lx
" "
557 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
558 __func__
, type
== ACCESS_CODE
? 'I' : 'D', i
, virtual,
559 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
568 static inline target_phys_addr_t
get_pteg_offset(CPUState
*env
,
569 target_phys_addr_t hash
,
572 return (hash
* pte_size
* 8) & env
->htab_mask
;
575 /* PTE table lookup */
576 static inline int _find_pte(CPUState
*env
, mmu_ctx_t
*ctx
, int is_64b
, int h
,
577 int rw
, int type
, int target_page_bits
)
579 target_phys_addr_t pteg_off
;
580 target_ulong pte0
, pte1
;
584 ret
= -1; /* No entry found */
585 pteg_off
= get_pteg_offset(env
, ctx
->hash
[h
],
586 is_64b
? HASH_PTE_SIZE_64
: HASH_PTE_SIZE_32
);
587 for (i
= 0; i
< 8; i
++) {
588 #if defined(TARGET_PPC64)
590 if (env
->external_htab
) {
591 pte0
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16));
592 pte1
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16) + 8);
594 pte0
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16));
595 pte1
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16) + 8);
598 /* We have a TLB that saves 4K pages, so let's
599 * split a huge page to 4k chunks */
600 if (target_page_bits
!= TARGET_PAGE_BITS
)
601 pte1
|= (ctx
->eaddr
& (( 1 << target_page_bits
) - 1))
604 r
= pte64_check(ctx
, pte0
, pte1
, h
, rw
, type
);
605 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
606 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
607 pteg_off
+ (i
* 16), pte0
, pte1
, (int)(pte0
& 1), h
,
608 (int)((pte0
>> 1) & 1), ctx
->ptem
);
612 if (env
->external_htab
) {
613 pte0
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8));
614 pte1
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8) + 4);
616 pte0
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8));
617 pte1
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8) + 4);
619 r
= pte32_check(ctx
, pte0
, pte1
, h
, rw
, type
);
620 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
621 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
622 pteg_off
+ (i
* 8), pte0
, pte1
, (int)(pte0
>> 31), h
,
623 (int)((pte0
>> 6) & 1), ctx
->ptem
);
627 /* PTE inconsistency */
630 /* Access violation */
640 /* XXX: we should go on looping to check all PTEs consistency
641 * but if we can speed-up the whole thing as the
642 * result would be undefined if PTEs are not consistent.
651 LOG_MMU("found PTE at addr " TARGET_FMT_lx
" prot=%01x ret=%d\n",
652 ctx
->raddr
, ctx
->prot
, ret
);
653 /* Update page flags */
655 if (pte_update_flags(ctx
, &pte1
, ret
, rw
) == 1) {
656 #if defined(TARGET_PPC64)
658 if (env
->external_htab
) {
659 stq_p(env
->external_htab
+ pteg_off
+ (good
* 16) + 8,
662 stq_phys_notdirty(env
->htab_base
+ pteg_off
+
663 (good
* 16) + 8, pte1
);
668 if (env
->external_htab
) {
669 stl_p(env
->external_htab
+ pteg_off
+ (good
* 8) + 4,
672 stl_phys_notdirty(env
->htab_base
+ pteg_off
+
673 (good
* 8) + 4, pte1
);
682 static inline int find_pte(CPUState
*env
, mmu_ctx_t
*ctx
, int h
, int rw
,
683 int type
, int target_page_bits
)
685 #if defined(TARGET_PPC64)
686 if (env
->mmu_model
& POWERPC_MMU_64
)
687 return _find_pte(env
, ctx
, 1, h
, rw
, type
, target_page_bits
);
690 return _find_pte(env
, ctx
, 0, h
, rw
, type
, target_page_bits
);
693 #if defined(TARGET_PPC64)
694 static inline ppc_slb_t
*slb_lookup(CPUPPCState
*env
, target_ulong eaddr
)
696 uint64_t esid_256M
, esid_1T
;
699 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
701 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
702 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
704 for (n
= 0; n
< env
->slb_nr
; n
++) {
705 ppc_slb_t
*slb
= &env
->slb
[n
];
707 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
708 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
709 /* We check for 1T matches on all MMUs here - if the MMU
710 * doesn't have 1T segment support, we will have prevented 1T
711 * entries from being inserted in the slbmte code. */
712 if (((slb
->esid
== esid_256M
) &&
713 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
714 || ((slb
->esid
== esid_1T
) &&
715 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
723 void ppc_slb_invalidate_all (CPUPPCState
*env
)
725 int n
, do_invalidate
;
728 /* XXX: Warning: slbia never invalidates the first segment */
729 for (n
= 1; n
< env
->slb_nr
; n
++) {
730 ppc_slb_t
*slb
= &env
->slb
[n
];
732 if (slb
->esid
& SLB_ESID_V
) {
733 slb
->esid
&= ~SLB_ESID_V
;
734 /* XXX: given the fact that segment size is 256 MB or 1TB,
735 * and we still don't have a tlb_flush_mask(env, n, mask)
736 * in Qemu, we just invalidate all TLBs
745 void ppc_slb_invalidate_one (CPUPPCState
*env
, uint64_t T0
)
749 slb
= slb_lookup(env
, T0
);
754 if (slb
->esid
& SLB_ESID_V
) {
755 slb
->esid
&= ~SLB_ESID_V
;
757 /* XXX: given the fact that segment size is 256 MB or 1TB,
758 * and we still don't have a tlb_flush_mask(env, n, mask)
759 * in Qemu, we just invalidate all TLBs
765 int ppc_store_slb (CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
767 int slot
= rb
& 0xfff;
768 ppc_slb_t
*slb
= &env
->slb
[slot
];
770 if (rb
& (0x1000 - env
->slb_nr
)) {
771 return -1; /* Reserved bits set or slot too high */
773 if (rs
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
774 return -1; /* Bad segment size */
776 if ((rs
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
777 return -1; /* 1T segment on MMU that doesn't support it */
780 /* Mask out the slot number as we store the entry */
781 slb
->esid
= rb
& (SLB_ESID_ESID
| SLB_ESID_V
);
784 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
785 " %016" PRIx64
"\n", __func__
, slot
, rb
, rs
,
786 slb
->esid
, slb
->vsid
);
791 int ppc_load_slb_esid (CPUPPCState
*env
, target_ulong rb
, target_ulong
*rt
)
793 int slot
= rb
& 0xfff;
794 ppc_slb_t
*slb
= &env
->slb
[slot
];
796 if (slot
>= env
->slb_nr
) {
804 int ppc_load_slb_vsid (CPUPPCState
*env
, target_ulong rb
, target_ulong
*rt
)
806 int slot
= rb
& 0xfff;
807 ppc_slb_t
*slb
= &env
->slb
[slot
];
809 if (slot
>= env
->slb_nr
) {
816 #endif /* defined(TARGET_PPC64) */
818 /* Perform segment based translation */
819 static inline int get_segment(CPUState
*env
, mmu_ctx_t
*ctx
,
820 target_ulong eaddr
, int rw
, int type
)
822 target_phys_addr_t hash
;
824 int ds
, pr
, target_page_bits
;
829 #if defined(TARGET_PPC64)
830 if (env
->mmu_model
& POWERPC_MMU_64
) {
832 target_ulong pageaddr
;
835 LOG_MMU("Check SLBs\n");
836 slb
= slb_lookup(env
, eaddr
);
841 if (slb
->vsid
& SLB_VSID_B
) {
842 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
845 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
849 target_page_bits
= (slb
->vsid
& SLB_VSID_L
)
850 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
851 ctx
->key
= !!(pr
? (slb
->vsid
& SLB_VSID_KP
)
852 : (slb
->vsid
& SLB_VSID_KS
));
854 ctx
->nx
= !!(slb
->vsid
& SLB_VSID_N
);
856 pageaddr
= eaddr
& ((1ULL << segment_bits
)
857 - (1ULL << target_page_bits
));
858 if (slb
->vsid
& SLB_VSID_B
) {
859 hash
= vsid
^ (vsid
<< 25) ^ (pageaddr
>> target_page_bits
);
861 hash
= vsid
^ (pageaddr
>> target_page_bits
);
863 /* Only 5 bits of the page index are used in the AVPN */
864 ctx
->ptem
= (slb
->vsid
& SLB_VSID_PTEM
) |
865 ((pageaddr
>> 16) & ((1ULL << segment_bits
) - 0x80));
867 #endif /* defined(TARGET_PPC64) */
869 target_ulong sr
, pgidx
;
871 sr
= env
->sr
[eaddr
>> 28];
872 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
873 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
874 ds
= sr
& 0x80000000 ? 1 : 0;
875 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
876 vsid
= sr
& 0x00FFFFFF;
877 target_page_bits
= TARGET_PAGE_BITS
;
878 LOG_MMU("Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
" nip="
879 TARGET_FMT_lx
" lr=" TARGET_FMT_lx
880 " ir=%d dr=%d pr=%d %d t=%d\n",
881 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
882 (int)msr_dr
, pr
!= 0 ? 1 : 0, rw
, type
);
883 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
885 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
887 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
888 ctx
->key
, ds
, ctx
->nx
, vsid
);
891 /* Check if instruction fetch is allowed, if needed */
892 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
893 /* Page address translation */
894 LOG_MMU("htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
895 " hash " TARGET_FMT_plx
"\n",
896 env
->htab_base
, env
->htab_mask
, hash
);
898 ctx
->hash
[1] = ~hash
;
900 /* Initialize real address with an invalid value */
901 ctx
->raddr
= (target_phys_addr_t
)-1ULL;
902 if (unlikely(env
->mmu_model
== POWERPC_MMU_SOFT_6xx
||
903 env
->mmu_model
== POWERPC_MMU_SOFT_74xx
)) {
904 /* Software TLB search */
905 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, rw
, type
);
907 LOG_MMU("0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
908 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
909 " hash=" TARGET_FMT_plx
"\n",
910 env
->htab_base
, env
->htab_mask
, vsid
, ctx
->ptem
,
912 /* Primary table lookup */
913 ret
= find_pte(env
, ctx
, 0, rw
, type
, target_page_bits
);
915 /* Secondary table lookup */
916 if (eaddr
!= 0xEFFFFFFF)
917 LOG_MMU("1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
918 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
919 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
920 env
->htab_mask
, vsid
, ctx
->ptem
, ctx
->hash
[1]);
921 ret2
= find_pte(env
, ctx
, 1, rw
, type
,
927 #if defined (DUMP_PAGE_TABLES)
928 if (qemu_log_enabled()) {
929 target_phys_addr_t curaddr
;
930 uint32_t a0
, a1
, a2
, a3
;
931 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
932 "\n", sdr
, mask
+ 0x80);
933 for (curaddr
= sdr
; curaddr
< (sdr
+ mask
+ 0x80);
935 a0
= ldl_phys(curaddr
);
936 a1
= ldl_phys(curaddr
+ 4);
937 a2
= ldl_phys(curaddr
+ 8);
938 a3
= ldl_phys(curaddr
+ 12);
939 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
940 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
941 curaddr
, a0
, a1
, a2
, a3
);
947 LOG_MMU("No access allowed\n");
952 LOG_MMU("direct store...\n");
953 /* Direct-store segment : absolutely *BUGGY* for now */
955 /* Direct-store implies a 32-bit MMU.
956 * Check the Segment Register's bus unit ID (BUID).
958 sr
= env
->sr
[eaddr
>> 28];
959 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
960 /* Memory-forced I/O controller interface access */
961 /* If T=1 and BUID=x'07F', the 601 performs a memory access
962 * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
964 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
965 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
971 /* Integer load/store : only access allowed */
974 /* No code fetch is allowed in direct-store areas */
977 /* Floating point load/store */
980 /* lwarx, ldarx or srwcx. */
983 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
984 /* Should make the instruction do no-op.
985 * As it already do no-op, it's quite easy :-)
993 qemu_log("ERROR: instruction should not need "
994 "address translation\n");
997 if ((rw
== 1 || ctx
->key
!= 1) && (rw
== 0 || ctx
->key
!= 0)) {
1008 /* Generic TLB check function for embedded PowerPC implementations */
1009 int ppcemb_tlb_check(CPUState
*env
, ppcemb_tlb_t
*tlb
,
1010 target_phys_addr_t
*raddrp
,
1011 target_ulong address
, uint32_t pid
, int ext
,
1016 /* Check valid flag */
1017 if (!(tlb
->prot
& PAGE_VALID
)) {
1020 mask
= ~(tlb
->size
- 1);
1021 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
1022 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
1023 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
1025 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
)
1027 /* Check effective address */
1028 if ((address
& mask
) != tlb
->EPN
)
1030 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
1031 #if (TARGET_PHYS_ADDR_BITS >= 36)
1033 /* Extend the physical address to 36 bits */
1034 *raddrp
|= (target_phys_addr_t
)(tlb
->RPN
& 0xF) << 32;
1041 /* Generic TLB search function for PowerPC embedded implementations */
1042 int ppcemb_tlb_search (CPUPPCState
*env
, target_ulong address
, uint32_t pid
)
1045 target_phys_addr_t raddr
;
1048 /* Default return value is no match */
1050 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1051 tlb
= &env
->tlb
.tlbe
[i
];
1052 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
1061 /* Helpers specific to PowerPC 40x implementations */
1062 static inline void ppc4xx_tlb_invalidate_all(CPUState
*env
)
1067 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1068 tlb
= &env
->tlb
.tlbe
[i
];
1069 tlb
->prot
&= ~PAGE_VALID
;
1074 static inline void ppc4xx_tlb_invalidate_virt(CPUState
*env
,
1075 target_ulong eaddr
, uint32_t pid
)
1077 #if !defined(FLUSH_ALL_TLBS)
1079 target_phys_addr_t raddr
;
1080 target_ulong page
, end
;
1083 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1084 tlb
= &env
->tlb
.tlbe
[i
];
1085 if (ppcemb_tlb_check(env
, tlb
, &raddr
, eaddr
, pid
, 0, i
) == 0) {
1086 end
= tlb
->EPN
+ tlb
->size
;
1087 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
1088 tlb_flush_page(env
, page
);
1089 tlb
->prot
&= ~PAGE_VALID
;
1094 ppc4xx_tlb_invalidate_all(env
);
1098 static int mmu40x_get_physical_address (CPUState
*env
, mmu_ctx_t
*ctx
,
1099 target_ulong address
, int rw
, int access_type
)
1102 target_phys_addr_t raddr
;
1103 int i
, ret
, zsel
, zpr
, pr
;
1106 raddr
= (target_phys_addr_t
)-1ULL;
1108 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1109 tlb
= &env
->tlb
.tlbe
[i
];
1110 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
1111 env
->spr
[SPR_40x_PID
], 0, i
) < 0)
1113 zsel
= (tlb
->attr
>> 4) & 0xF;
1114 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
1115 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1116 __func__
, i
, zsel
, zpr
, rw
, tlb
->attr
);
1117 /* Check execute enable bit */
1124 /* All accesses granted */
1125 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1130 /* Raise Zone protection fault. */
1131 env
->spr
[SPR_40x_ESR
] = 1 << 22;
1139 /* Check from TLB entry */
1140 ctx
->prot
= tlb
->prot
;
1141 ret
= check_prot(ctx
->prot
, rw
, access_type
);
1143 env
->spr
[SPR_40x_ESR
] = 0;
1148 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1149 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1154 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1155 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1160 void store_40x_sler (CPUPPCState
*env
, uint32_t val
)
1162 /* XXX: TO BE FIXED */
1163 if (val
!= 0x00000000) {
1164 cpu_abort(env
, "Little-endian regions are not supported by now\n");
1166 env
->spr
[SPR_405_SLER
] = val
;
1169 static inline int mmubooke_check_tlb (CPUState
*env
, ppcemb_tlb_t
*tlb
,
1170 target_phys_addr_t
*raddr
, int *prot
,
1171 target_ulong address
, int rw
,
1172 int access_type
, int i
)
1176 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1177 env
->spr
[SPR_BOOKE_PID
],
1178 !env
->nb_pids
, i
) >= 0) {
1182 if (env
->spr
[SPR_BOOKE_PID1
] &&
1183 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1184 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
1188 if (env
->spr
[SPR_BOOKE_PID2
] &&
1189 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1190 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
1194 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1200 _prot
= tlb
->prot
& 0xF;
1202 _prot
= (tlb
->prot
>> 4) & 0xF;
1205 /* Check the address space */
1206 if (access_type
== ACCESS_CODE
) {
1207 if (msr_ir
!= (tlb
->attr
& 1)) {
1208 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1213 if (_prot
& PAGE_EXEC
) {
1214 LOG_SWTLB("%s: good TLB!\n", __func__
);
1218 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, _prot
);
1221 if (msr_dr
!= (tlb
->attr
& 1)) {
1222 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1227 if ((!rw
&& _prot
& PAGE_READ
) || (rw
&& (_prot
& PAGE_WRITE
))) {
1228 LOG_SWTLB("%s: found TLB!\n", __func__
);
1232 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, _prot
);
1239 static int mmubooke_get_physical_address (CPUState
*env
, mmu_ctx_t
*ctx
,
1240 target_ulong address
, int rw
,
1244 target_phys_addr_t raddr
;
1248 raddr
= (target_phys_addr_t
)-1ULL;
1249 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1250 tlb
= &env
->tlb
.tlbe
[i
];
1251 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
, rw
,
1260 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1261 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1264 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1265 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1271 void booke206_flush_tlb(CPUState
*env
, int flags
, const int check_iprot
)
1275 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
1277 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1278 if (flags
& (1 << i
)) {
1279 tlb_size
= booke206_tlb_size(env
, i
);
1280 for (j
= 0; j
< tlb_size
; j
++) {
1281 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
1282 tlb
[j
].mas1
&= ~MAS1_VALID
;
1286 tlb
+= booke206_tlb_size(env
, i
);
1292 target_phys_addr_t
booke206_tlb_to_page_size(CPUState
*env
, ppcmas_tlb_t
*tlb
)
1295 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
1296 target_phys_addr_t tlbm_size
;
1298 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
1300 if (tlbncfg
& TLBnCFG_AVAIL
) {
1301 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1303 tlbm_size
= (tlbncfg
& TLBnCFG_MINSIZE
) >> TLBnCFG_MINSIZE_SHIFT
;
1306 return (1 << (tlbm_size
<< 1)) << 10;
1309 /* TLB check function for MAS based SoftTLBs */
1310 int ppcmas_tlb_check(CPUState
*env
, ppcmas_tlb_t
*tlb
,
1311 target_phys_addr_t
*raddrp
,
1312 target_ulong address
, uint32_t pid
)
1317 /* Check valid flag */
1318 if (!(tlb
->mas1
& MAS1_VALID
)) {
1322 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
1323 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
1324 PRIx64
" mask=0x" TARGET_FMT_lx
" MAS7_3=0x%" PRIx64
" MAS8=%x\n",
1325 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
, tlb
->mas7_3
,
1329 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
1330 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
1334 /* Check effective address */
1335 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
1338 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
1343 static int mmubooke206_check_tlb(CPUState
*env
, ppcmas_tlb_t
*tlb
,
1344 target_phys_addr_t
*raddr
, int *prot
,
1345 target_ulong address
, int rw
,
1351 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1352 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
1356 if (env
->spr
[SPR_BOOKE_PID1
] &&
1357 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1358 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
1362 if (env
->spr
[SPR_BOOKE_PID2
] &&
1363 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1364 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
1368 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1374 if (tlb
->mas7_3
& MAS3_UR
) {
1377 if (tlb
->mas7_3
& MAS3_UW
) {
1378 _prot
|= PAGE_WRITE
;
1380 if (tlb
->mas7_3
& MAS3_UX
) {
1384 if (tlb
->mas7_3
& MAS3_SR
) {
1387 if (tlb
->mas7_3
& MAS3_SW
) {
1388 _prot
|= PAGE_WRITE
;
1390 if (tlb
->mas7_3
& MAS3_SX
) {
1395 /* Check the address space and permissions */
1396 if (access_type
== ACCESS_CODE
) {
1397 if (msr_ir
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1398 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1403 if (_prot
& PAGE_EXEC
) {
1404 LOG_SWTLB("%s: good TLB!\n", __func__
);
1408 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, _prot
);
1411 if (msr_dr
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1412 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1417 if ((!rw
&& _prot
& PAGE_READ
) || (rw
&& (_prot
& PAGE_WRITE
))) {
1418 LOG_SWTLB("%s: found TLB!\n", __func__
);
1422 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, _prot
);
1429 static int mmubooke206_get_physical_address(CPUState
*env
, mmu_ctx_t
*ctx
,
1430 target_ulong address
, int rw
,
1434 target_phys_addr_t raddr
;
1438 raddr
= (target_phys_addr_t
)-1ULL;
1440 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1441 int ways
= booke206_tlb_ways(env
, i
);
1443 for (j
= 0; j
< ways
; j
++) {
1444 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1445 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1457 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1458 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1461 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1462 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1468 static inline int check_physical(CPUState
*env
, mmu_ctx_t
*ctx
,
1469 target_ulong eaddr
, int rw
)
1474 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1476 switch (env
->mmu_model
) {
1477 case POWERPC_MMU_32B
:
1478 case POWERPC_MMU_601
:
1479 case POWERPC_MMU_SOFT_6xx
:
1480 case POWERPC_MMU_SOFT_74xx
:
1481 case POWERPC_MMU_SOFT_4xx
:
1482 case POWERPC_MMU_REAL
:
1483 case POWERPC_MMU_BOOKE
:
1484 ctx
->prot
|= PAGE_WRITE
;
1486 #if defined(TARGET_PPC64)
1487 case POWERPC_MMU_620
:
1488 case POWERPC_MMU_64B
:
1489 case POWERPC_MMU_2_06
:
1490 /* Real address are 60 bits long */
1491 ctx
->raddr
&= 0x0FFFFFFFFFFFFFFFULL
;
1492 ctx
->prot
|= PAGE_WRITE
;
1495 case POWERPC_MMU_SOFT_4xx_Z
:
1496 if (unlikely(msr_pe
!= 0)) {
1497 /* 403 family add some particular protections,
1498 * using PBL/PBU registers for accesses with no translation.
1501 /* Check PLB validity */
1502 (env
->pb
[0] < env
->pb
[1] &&
1503 /* and address in plb area */
1504 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1505 (env
->pb
[2] < env
->pb
[3] &&
1506 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1507 if (in_plb
^ msr_px
) {
1508 /* Access in protected area */
1510 /* Access is not allowed */
1514 /* Read-write access is allowed */
1515 ctx
->prot
|= PAGE_WRITE
;
1519 case POWERPC_MMU_MPC8xx
:
1521 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1523 case POWERPC_MMU_BOOKE206
:
1524 cpu_abort(env
, "BookE 2.06 MMU doesn't have physical real mode\n");
1527 cpu_abort(env
, "Unknown or invalid MMU model\n");
1534 int get_physical_address (CPUState
*env
, mmu_ctx_t
*ctx
, target_ulong eaddr
,
1535 int rw
, int access_type
)
1540 qemu_log("%s\n", __func__
);
1542 if ((access_type
== ACCESS_CODE
&& msr_ir
== 0) ||
1543 (access_type
!= ACCESS_CODE
&& msr_dr
== 0)) {
1544 if (env
->mmu_model
== POWERPC_MMU_BOOKE
) {
1545 /* The BookE MMU always performs address translation. The
1546 IS and DS bits only affect the address space. */
1547 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1549 } else if (env
->mmu_model
== POWERPC_MMU_BOOKE206
) {
1550 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1553 /* No address translation. */
1554 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1558 switch (env
->mmu_model
) {
1559 case POWERPC_MMU_32B
:
1560 case POWERPC_MMU_601
:
1561 case POWERPC_MMU_SOFT_6xx
:
1562 case POWERPC_MMU_SOFT_74xx
:
1563 /* Try to find a BAT */
1564 if (env
->nb_BATs
!= 0)
1565 ret
= get_bat(env
, ctx
, eaddr
, rw
, access_type
);
1566 #if defined(TARGET_PPC64)
1567 case POWERPC_MMU_620
:
1568 case POWERPC_MMU_64B
:
1569 case POWERPC_MMU_2_06
:
1572 /* We didn't match any BAT entry or don't have BATs */
1573 ret
= get_segment(env
, ctx
, eaddr
, rw
, access_type
);
1576 case POWERPC_MMU_SOFT_4xx
:
1577 case POWERPC_MMU_SOFT_4xx_Z
:
1578 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1581 case POWERPC_MMU_BOOKE
:
1582 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1585 case POWERPC_MMU_BOOKE206
:
1586 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1589 case POWERPC_MMU_MPC8xx
:
1591 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1593 case POWERPC_MMU_REAL
:
1594 cpu_abort(env
, "PowerPC in real mode do not do any translation\n");
1597 cpu_abort(env
, "Unknown or invalid MMU model\n");
1602 qemu_log("%s address " TARGET_FMT_lx
" => %d " TARGET_FMT_plx
"\n",
1603 __func__
, eaddr
, ret
, ctx
->raddr
);
1609 target_phys_addr_t
cpu_get_phys_page_debug (CPUState
*env
, target_ulong addr
)
1613 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) != 0))
1616 return ctx
.raddr
& TARGET_PAGE_MASK
;
1619 static void booke206_update_mas_tlb_miss(CPUState
*env
, target_ulong address
,
1622 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1623 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1624 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1625 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1626 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1627 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1630 if (((rw
== 2) && msr_ir
) || ((rw
!= 2) && msr_dr
)) {
1631 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1632 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1635 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1636 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1638 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1639 case MAS4_TIDSELD_PID0
:
1640 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID
] << MAS1_TID_SHIFT
;
1642 case MAS4_TIDSELD_PID1
:
1643 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID1
] << MAS1_TID_SHIFT
;
1645 case MAS4_TIDSELD_PID2
:
1646 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID2
] << MAS1_TID_SHIFT
;
1650 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1652 /* next victim logic */
1653 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1655 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1656 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1659 /* Perform address translation */
1660 int cpu_ppc_handle_mmu_fault (CPUState
*env
, target_ulong address
, int rw
,
1670 access_type
= ACCESS_CODE
;
1673 access_type
= env
->access_type
;
1675 ret
= get_physical_address(env
, &ctx
, address
, rw
, access_type
);
1677 tlb_set_page(env
, address
& TARGET_PAGE_MASK
,
1678 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1679 mmu_idx
, TARGET_PAGE_SIZE
);
1681 } else if (ret
< 0) {
1683 if (access_type
== ACCESS_CODE
) {
1686 /* No matches in page tables or TLB */
1687 switch (env
->mmu_model
) {
1688 case POWERPC_MMU_SOFT_6xx
:
1689 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1690 env
->error_code
= 1 << 18;
1691 env
->spr
[SPR_IMISS
] = address
;
1692 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1694 case POWERPC_MMU_SOFT_74xx
:
1695 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1697 case POWERPC_MMU_SOFT_4xx
:
1698 case POWERPC_MMU_SOFT_4xx_Z
:
1699 env
->exception_index
= POWERPC_EXCP_ITLB
;
1700 env
->error_code
= 0;
1701 env
->spr
[SPR_40x_DEAR
] = address
;
1702 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1704 case POWERPC_MMU_32B
:
1705 case POWERPC_MMU_601
:
1706 #if defined(TARGET_PPC64)
1707 case POWERPC_MMU_620
:
1708 case POWERPC_MMU_64B
:
1709 case POWERPC_MMU_2_06
:
1711 env
->exception_index
= POWERPC_EXCP_ISI
;
1712 env
->error_code
= 0x40000000;
1714 case POWERPC_MMU_BOOKE206
:
1715 booke206_update_mas_tlb_miss(env
, address
, rw
);
1717 case POWERPC_MMU_BOOKE
:
1718 env
->exception_index
= POWERPC_EXCP_ITLB
;
1719 env
->error_code
= 0;
1720 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1722 case POWERPC_MMU_MPC8xx
:
1724 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1726 case POWERPC_MMU_REAL
:
1727 cpu_abort(env
, "PowerPC in real mode should never raise "
1728 "any MMU exceptions\n");
1731 cpu_abort(env
, "Unknown or invalid MMU model\n");
1736 /* Access rights violation */
1737 env
->exception_index
= POWERPC_EXCP_ISI
;
1738 env
->error_code
= 0x08000000;
1741 /* No execute protection violation */
1742 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1743 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1744 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1746 env
->exception_index
= POWERPC_EXCP_ISI
;
1747 env
->error_code
= 0x10000000;
1750 /* Direct store exception */
1751 /* No code fetch is allowed in direct-store areas */
1752 env
->exception_index
= POWERPC_EXCP_ISI
;
1753 env
->error_code
= 0x10000000;
1755 #if defined(TARGET_PPC64)
1757 /* No match in segment table */
1758 if (env
->mmu_model
== POWERPC_MMU_620
) {
1759 env
->exception_index
= POWERPC_EXCP_ISI
;
1760 /* XXX: this might be incorrect */
1761 env
->error_code
= 0x40000000;
1763 env
->exception_index
= POWERPC_EXCP_ISEG
;
1764 env
->error_code
= 0;
1772 /* No matches in page tables or TLB */
1773 switch (env
->mmu_model
) {
1774 case POWERPC_MMU_SOFT_6xx
:
1776 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1777 env
->error_code
= 1 << 16;
1779 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1780 env
->error_code
= 0;
1782 env
->spr
[SPR_DMISS
] = address
;
1783 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1785 env
->error_code
|= ctx
.key
<< 19;
1786 env
->spr
[SPR_HASH1
] = env
->htab_base
+
1787 get_pteg_offset(env
, ctx
.hash
[0], HASH_PTE_SIZE_32
);
1788 env
->spr
[SPR_HASH2
] = env
->htab_base
+
1789 get_pteg_offset(env
, ctx
.hash
[1], HASH_PTE_SIZE_32
);
1791 case POWERPC_MMU_SOFT_74xx
:
1793 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1795 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1798 /* Implement LRU algorithm */
1799 env
->error_code
= ctx
.key
<< 19;
1800 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1801 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1802 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1804 case POWERPC_MMU_SOFT_4xx
:
1805 case POWERPC_MMU_SOFT_4xx_Z
:
1806 env
->exception_index
= POWERPC_EXCP_DTLB
;
1807 env
->error_code
= 0;
1808 env
->spr
[SPR_40x_DEAR
] = address
;
1810 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1812 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1814 case POWERPC_MMU_32B
:
1815 case POWERPC_MMU_601
:
1816 #if defined(TARGET_PPC64)
1817 case POWERPC_MMU_620
:
1818 case POWERPC_MMU_64B
:
1819 case POWERPC_MMU_2_06
:
1821 env
->exception_index
= POWERPC_EXCP_DSI
;
1822 env
->error_code
= 0;
1823 env
->spr
[SPR_DAR
] = address
;
1825 env
->spr
[SPR_DSISR
] = 0x42000000;
1827 env
->spr
[SPR_DSISR
] = 0x40000000;
1829 case POWERPC_MMU_MPC8xx
:
1831 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1833 case POWERPC_MMU_BOOKE206
:
1834 booke206_update_mas_tlb_miss(env
, address
, rw
);
1836 case POWERPC_MMU_BOOKE
:
1837 env
->exception_index
= POWERPC_EXCP_DTLB
;
1838 env
->error_code
= 0;
1839 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1840 env
->spr
[SPR_BOOKE_ESR
] = rw
? 1 << ESR_ST
: 0;
1842 case POWERPC_MMU_REAL
:
1843 cpu_abort(env
, "PowerPC in real mode should never raise "
1844 "any MMU exceptions\n");
1847 cpu_abort(env
, "Unknown or invalid MMU model\n");
1852 /* Access rights violation */
1853 env
->exception_index
= POWERPC_EXCP_DSI
;
1854 env
->error_code
= 0;
1855 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
1856 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
1857 env
->spr
[SPR_40x_DEAR
] = address
;
1859 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
1861 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1862 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1863 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1864 env
->spr
[SPR_BOOKE_ESR
] = rw
? 1 << ESR_ST
: 0;
1866 env
->spr
[SPR_DAR
] = address
;
1868 env
->spr
[SPR_DSISR
] = 0x0A000000;
1870 env
->spr
[SPR_DSISR
] = 0x08000000;
1875 /* Direct store exception */
1876 switch (access_type
) {
1878 /* Floating point load/store */
1879 env
->exception_index
= POWERPC_EXCP_ALIGN
;
1880 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
1881 env
->spr
[SPR_DAR
] = address
;
1884 /* lwarx, ldarx or stwcx. */
1885 env
->exception_index
= POWERPC_EXCP_DSI
;
1886 env
->error_code
= 0;
1887 env
->spr
[SPR_DAR
] = address
;
1889 env
->spr
[SPR_DSISR
] = 0x06000000;
1891 env
->spr
[SPR_DSISR
] = 0x04000000;
1894 /* eciwx or ecowx */
1895 env
->exception_index
= POWERPC_EXCP_DSI
;
1896 env
->error_code
= 0;
1897 env
->spr
[SPR_DAR
] = address
;
1899 env
->spr
[SPR_DSISR
] = 0x06100000;
1901 env
->spr
[SPR_DSISR
] = 0x04100000;
1904 printf("DSI: invalid exception (%d)\n", ret
);
1905 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
1907 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
1908 env
->spr
[SPR_DAR
] = address
;
1912 #if defined(TARGET_PPC64)
1914 /* No match in segment table */
1915 if (env
->mmu_model
== POWERPC_MMU_620
) {
1916 env
->exception_index
= POWERPC_EXCP_DSI
;
1917 env
->error_code
= 0;
1918 env
->spr
[SPR_DAR
] = address
;
1919 /* XXX: this might be incorrect */
1921 env
->spr
[SPR_DSISR
] = 0x42000000;
1923 env
->spr
[SPR_DSISR
] = 0x40000000;
1925 env
->exception_index
= POWERPC_EXCP_DSEG
;
1926 env
->error_code
= 0;
1927 env
->spr
[SPR_DAR
] = address
;
1934 printf("%s: set exception to %d %02x\n", __func__
,
1935 env
->exception
, env
->error_code
);
1943 /*****************************************************************************/
1944 /* BATs management */
1945 #if !defined(FLUSH_ALL_TLBS)
1946 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
1949 target_ulong base
, end
, page
;
1951 base
= BATu
& ~0x0001FFFF;
1952 end
= base
+ mask
+ 0x00020000;
1953 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
1954 TARGET_FMT_lx
")\n", base
, end
, mask
);
1955 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
)
1956 tlb_flush_page(env
, page
);
1957 LOG_BATS("Flush done\n");
1961 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
1964 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
1965 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
1968 void ppc_store_ibatu (CPUPPCState
*env
, int nr
, target_ulong value
)
1972 dump_store_bat(env
, 'I', 0, nr
, value
);
1973 if (env
->IBAT
[0][nr
] != value
) {
1974 mask
= (value
<< 15) & 0x0FFE0000UL
;
1975 #if !defined(FLUSH_ALL_TLBS)
1976 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1978 /* When storing valid upper BAT, mask BEPI and BRPN
1979 * and invalidate all TLBs covered by this BAT
1981 mask
= (value
<< 15) & 0x0FFE0000UL
;
1982 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
1983 (value
& ~0x0001FFFFUL
& ~mask
);
1984 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
1985 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
1986 #if !defined(FLUSH_ALL_TLBS)
1987 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
1994 void ppc_store_ibatl (CPUPPCState
*env
, int nr
, target_ulong value
)
1996 dump_store_bat(env
, 'I', 1, nr
, value
);
1997 env
->IBAT
[1][nr
] = value
;
2000 void ppc_store_dbatu (CPUPPCState
*env
, int nr
, target_ulong value
)
2004 dump_store_bat(env
, 'D', 0, nr
, value
);
2005 if (env
->DBAT
[0][nr
] != value
) {
2006 /* When storing valid upper BAT, mask BEPI and BRPN
2007 * and invalidate all TLBs covered by this BAT
2009 mask
= (value
<< 15) & 0x0FFE0000UL
;
2010 #if !defined(FLUSH_ALL_TLBS)
2011 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2013 mask
= (value
<< 15) & 0x0FFE0000UL
;
2014 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2015 (value
& ~0x0001FFFFUL
& ~mask
);
2016 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
2017 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2018 #if !defined(FLUSH_ALL_TLBS)
2019 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2026 void ppc_store_dbatl (CPUPPCState
*env
, int nr
, target_ulong value
)
2028 dump_store_bat(env
, 'D', 1, nr
, value
);
2029 env
->DBAT
[1][nr
] = value
;
2032 void ppc_store_ibatu_601 (CPUPPCState
*env
, int nr
, target_ulong value
)
2035 #if defined(FLUSH_ALL_TLBS)
2039 dump_store_bat(env
, 'I', 0, nr
, value
);
2040 if (env
->IBAT
[0][nr
] != value
) {
2041 #if defined(FLUSH_ALL_TLBS)
2044 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2045 if (env
->IBAT
[1][nr
] & 0x40) {
2046 /* Invalidate BAT only if it is valid */
2047 #if !defined(FLUSH_ALL_TLBS)
2048 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2053 /* When storing valid upper BAT, mask BEPI and BRPN
2054 * and invalidate all TLBs covered by this BAT
2056 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2057 (value
& ~0x0001FFFFUL
& ~mask
);
2058 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
2059 if (env
->IBAT
[1][nr
] & 0x40) {
2060 #if !defined(FLUSH_ALL_TLBS)
2061 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2066 #if defined(FLUSH_ALL_TLBS)
2073 void ppc_store_ibatl_601 (CPUPPCState
*env
, int nr
, target_ulong value
)
2076 #if defined(FLUSH_ALL_TLBS)
2080 dump_store_bat(env
, 'I', 1, nr
, value
);
2081 if (env
->IBAT
[1][nr
] != value
) {
2082 #if defined(FLUSH_ALL_TLBS)
2085 if (env
->IBAT
[1][nr
] & 0x40) {
2086 #if !defined(FLUSH_ALL_TLBS)
2087 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2088 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2094 #if !defined(FLUSH_ALL_TLBS)
2095 mask
= (value
<< 17) & 0x0FFE0000UL
;
2096 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2101 env
->IBAT
[1][nr
] = value
;
2102 env
->DBAT
[1][nr
] = value
;
2103 #if defined(FLUSH_ALL_TLBS)
2110 /*****************************************************************************/
2111 /* TLB management */
2112 void ppc_tlb_invalidate_all (CPUPPCState
*env
)
2114 switch (env
->mmu_model
) {
2115 case POWERPC_MMU_SOFT_6xx
:
2116 case POWERPC_MMU_SOFT_74xx
:
2117 ppc6xx_tlb_invalidate_all(env
);
2119 case POWERPC_MMU_SOFT_4xx
:
2120 case POWERPC_MMU_SOFT_4xx_Z
:
2121 ppc4xx_tlb_invalidate_all(env
);
2123 case POWERPC_MMU_REAL
:
2124 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2126 case POWERPC_MMU_MPC8xx
:
2128 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2130 case POWERPC_MMU_BOOKE
:
2133 case POWERPC_MMU_BOOKE206
:
2134 booke206_flush_tlb(env
, -1, 0);
2136 case POWERPC_MMU_32B
:
2137 case POWERPC_MMU_601
:
2138 #if defined(TARGET_PPC64)
2139 case POWERPC_MMU_620
:
2140 case POWERPC_MMU_64B
:
2141 case POWERPC_MMU_2_06
:
2142 #endif /* defined(TARGET_PPC64) */
2147 cpu_abort(env
, "Unknown MMU model\n");
2152 void ppc_tlb_invalidate_one (CPUPPCState
*env
, target_ulong addr
)
2154 #if !defined(FLUSH_ALL_TLBS)
2155 addr
&= TARGET_PAGE_MASK
;
2156 switch (env
->mmu_model
) {
2157 case POWERPC_MMU_SOFT_6xx
:
2158 case POWERPC_MMU_SOFT_74xx
:
2159 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
2160 if (env
->id_tlbs
== 1)
2161 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
2163 case POWERPC_MMU_SOFT_4xx
:
2164 case POWERPC_MMU_SOFT_4xx_Z
:
2165 ppc4xx_tlb_invalidate_virt(env
, addr
, env
->spr
[SPR_40x_PID
]);
2167 case POWERPC_MMU_REAL
:
2168 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2170 case POWERPC_MMU_MPC8xx
:
2172 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2174 case POWERPC_MMU_BOOKE
:
2176 cpu_abort(env
, "BookE MMU model is not implemented\n");
2178 case POWERPC_MMU_BOOKE206
:
2180 cpu_abort(env
, "BookE 2.06 MMU model is not implemented\n");
2182 case POWERPC_MMU_32B
:
2183 case POWERPC_MMU_601
:
2184 /* tlbie invalidate TLBs for all segments */
2185 addr
&= ~((target_ulong
)-1ULL << 28);
2186 /* XXX: this case should be optimized,
2187 * giving a mask to tlb_flush_page
2189 tlb_flush_page(env
, addr
| (0x0 << 28));
2190 tlb_flush_page(env
, addr
| (0x1 << 28));
2191 tlb_flush_page(env
, addr
| (0x2 << 28));
2192 tlb_flush_page(env
, addr
| (0x3 << 28));
2193 tlb_flush_page(env
, addr
| (0x4 << 28));
2194 tlb_flush_page(env
, addr
| (0x5 << 28));
2195 tlb_flush_page(env
, addr
| (0x6 << 28));
2196 tlb_flush_page(env
, addr
| (0x7 << 28));
2197 tlb_flush_page(env
, addr
| (0x8 << 28));
2198 tlb_flush_page(env
, addr
| (0x9 << 28));
2199 tlb_flush_page(env
, addr
| (0xA << 28));
2200 tlb_flush_page(env
, addr
| (0xB << 28));
2201 tlb_flush_page(env
, addr
| (0xC << 28));
2202 tlb_flush_page(env
, addr
| (0xD << 28));
2203 tlb_flush_page(env
, addr
| (0xE << 28));
2204 tlb_flush_page(env
, addr
| (0xF << 28));
2206 #if defined(TARGET_PPC64)
2207 case POWERPC_MMU_620
:
2208 case POWERPC_MMU_64B
:
2209 case POWERPC_MMU_2_06
:
2210 /* tlbie invalidate TLBs for all segments */
2211 /* XXX: given the fact that there are too many segments to invalidate,
2212 * and we still don't have a tlb_flush_mask(env, n, mask) in Qemu,
2213 * we just invalidate all TLBs
2217 #endif /* defined(TARGET_PPC64) */
2220 cpu_abort(env
, "Unknown MMU model\n");
2224 ppc_tlb_invalidate_all(env
);
2228 /*****************************************************************************/
2229 /* Special registers manipulation */
2230 #if defined(TARGET_PPC64)
2231 void ppc_store_asr (CPUPPCState
*env
, target_ulong value
)
2233 if (env
->asr
!= value
) {
2240 void ppc_store_sdr1 (CPUPPCState
*env
, target_ulong value
)
2242 LOG_MMU("%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2243 if (env
->spr
[SPR_SDR1
] != value
) {
2244 env
->spr
[SPR_SDR1
] = value
;
2245 #if defined(TARGET_PPC64)
2246 if (env
->mmu_model
& POWERPC_MMU_64
) {
2247 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
2249 if (htabsize
> 28) {
2250 fprintf(stderr
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
2251 " stored in SDR1\n", htabsize
);
2254 env
->htab_mask
= (1ULL << (htabsize
+ 18)) - 1;
2255 env
->htab_base
= value
& SDR_64_HTABORG
;
2257 #endif /* defined(TARGET_PPC64) */
2259 /* FIXME: Should check for valid HTABMASK values */
2260 env
->htab_mask
= ((value
& SDR_32_HTABMASK
) << 16) | 0xFFFF;
2261 env
->htab_base
= value
& SDR_32_HTABORG
;
2267 #if defined(TARGET_PPC64)
2268 target_ulong
ppc_load_sr (CPUPPCState
*env
, int slb_nr
)
2275 void ppc_store_sr (CPUPPCState
*env
, int srnum
, target_ulong value
)
2277 LOG_MMU("%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2278 srnum
, value
, env
->sr
[srnum
]);
2279 #if defined(TARGET_PPC64)
2280 if (env
->mmu_model
& POWERPC_MMU_64
) {
2281 uint64_t rb
= 0, rs
= 0;
2284 rb
|= ((uint32_t)srnum
& 0xf) << 28;
2285 /* Set the valid bit */
2288 rb
|= (uint32_t)srnum
;
2291 rs
|= (value
& 0xfffffff) << 12;
2293 rs
|= ((value
>> 27) & 0xf) << 8;
2295 ppc_store_slb(env
, rb
, rs
);
2298 if (env
->sr
[srnum
] != value
) {
2299 env
->sr
[srnum
] = value
;
2300 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
2301 flusing the whole TLB. */
2302 #if !defined(FLUSH_ALL_TLBS) && 0
2304 target_ulong page
, end
;
2305 /* Invalidate 256 MB of virtual memory */
2306 page
= (16 << 20) * srnum
;
2307 end
= page
+ (16 << 20);
2308 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
)
2309 tlb_flush_page(env
, page
);
2316 #endif /* !defined (CONFIG_USER_ONLY) */
2318 /* GDBstub can read and write MSR... */
2319 void ppc_store_msr (CPUPPCState
*env
, target_ulong value
)
2321 hreg_store_msr(env
, value
, 0);
2324 /*****************************************************************************/
2325 /* Exception processing */
2326 #if defined (CONFIG_USER_ONLY)
2327 void do_interrupt (CPUState
*env
)
2329 env
->exception_index
= POWERPC_EXCP_NONE
;
2330 env
->error_code
= 0;
2333 void ppc_hw_interrupt (CPUState
*env
)
2335 env
->exception_index
= POWERPC_EXCP_NONE
;
2336 env
->error_code
= 0;
2338 #else /* defined (CONFIG_USER_ONLY) */
2339 static inline void dump_syscall(CPUState
*env
)
2341 qemu_log_mask(CPU_LOG_INT
, "syscall r0=%016" PRIx64
" r3=%016" PRIx64
2342 " r4=%016" PRIx64
" r5=%016" PRIx64
" r6=%016" PRIx64
2343 " nip=" TARGET_FMT_lx
"\n",
2344 ppc_dump_gpr(env
, 0), ppc_dump_gpr(env
, 3),
2345 ppc_dump_gpr(env
, 4), ppc_dump_gpr(env
, 5),
2346 ppc_dump_gpr(env
, 6), env
->nip
);
2349 /* Note that this function should be greatly optimized
2350 * when called with a constant excp, from ppc_hw_interrupt
2352 static inline void powerpc_excp(CPUState
*env
, int excp_model
, int excp
)
2354 target_ulong msr
, new_msr
, vector
;
2355 int srr0
, srr1
, asrr0
, asrr1
;
2356 int lpes0
, lpes1
, lev
;
2359 /* XXX: find a suitable condition to enable the hypervisor mode */
2360 lpes0
= (env
->spr
[SPR_LPCR
] >> 1) & 1;
2361 lpes1
= (env
->spr
[SPR_LPCR
] >> 2) & 1;
2363 /* Those values ensure we won't enter the hypervisor mode */
2368 qemu_log_mask(CPU_LOG_INT
, "Raise exception at " TARGET_FMT_lx
2369 " => %08x (%02x)\n", env
->nip
, excp
, env
->error_code
);
2371 /* new srr1 value excluding must-be-zero bits */
2372 msr
= env
->msr
& ~0x783f0000ULL
;
2374 /* new interrupt handler msr */
2375 new_msr
= env
->msr
& ((target_ulong
)1 << MSR_ME
);
2377 /* target registers */
2384 case POWERPC_EXCP_NONE
:
2385 /* Should never happen */
2387 case POWERPC_EXCP_CRITICAL
: /* Critical input */
2388 switch (excp_model
) {
2389 case POWERPC_EXCP_40x
:
2390 srr0
= SPR_40x_SRR2
;
2391 srr1
= SPR_40x_SRR3
;
2393 case POWERPC_EXCP_BOOKE
:
2394 srr0
= SPR_BOOKE_CSRR0
;
2395 srr1
= SPR_BOOKE_CSRR1
;
2397 case POWERPC_EXCP_G2
:
2403 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
2405 /* Machine check exception is not enabled.
2406 * Enter checkstop state.
2408 if (qemu_log_enabled()) {
2409 qemu_log("Machine check while not allowed. "
2410 "Entering checkstop state\n");
2412 fprintf(stderr
, "Machine check while not allowed. "
2413 "Entering checkstop state\n");
2416 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
2419 /* XXX: find a suitable condition to enable the hypervisor mode */
2420 new_msr
|= (target_ulong
)MSR_HVB
;
2423 /* machine check exceptions don't have ME set */
2424 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
2426 /* XXX: should also have something loaded in DAR / DSISR */
2427 switch (excp_model
) {
2428 case POWERPC_EXCP_40x
:
2429 srr0
= SPR_40x_SRR2
;
2430 srr1
= SPR_40x_SRR3
;
2432 case POWERPC_EXCP_BOOKE
:
2433 srr0
= SPR_BOOKE_MCSRR0
;
2434 srr1
= SPR_BOOKE_MCSRR1
;
2435 asrr0
= SPR_BOOKE_CSRR0
;
2436 asrr1
= SPR_BOOKE_CSRR1
;
2442 case POWERPC_EXCP_DSI
: /* Data storage exception */
2443 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx
" DAR=" TARGET_FMT_lx
2444 "\n", env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
2446 new_msr
|= (target_ulong
)MSR_HVB
;
2448 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
2449 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx
", nip=" TARGET_FMT_lx
2450 "\n", msr
, env
->nip
);
2452 new_msr
|= (target_ulong
)MSR_HVB
;
2453 msr
|= env
->error_code
;
2455 case POWERPC_EXCP_EXTERNAL
: /* External input */
2457 new_msr
|= (target_ulong
)MSR_HVB
;
2459 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
2461 new_msr
|= (target_ulong
)MSR_HVB
;
2462 /* XXX: this is false */
2463 /* Get rS/rD and rA from faulting opcode */
2464 env
->spr
[SPR_DSISR
] |= (ldl_code((env
->nip
- 4)) & 0x03FF0000) >> 16;
2466 case POWERPC_EXCP_PROGRAM
: /* Program exception */
2467 switch (env
->error_code
& ~0xF) {
2468 case POWERPC_EXCP_FP
:
2469 if ((msr_fe0
== 0 && msr_fe1
== 0) || msr_fp
== 0) {
2470 LOG_EXCP("Ignore floating point exception\n");
2471 env
->exception_index
= POWERPC_EXCP_NONE
;
2472 env
->error_code
= 0;
2476 new_msr
|= (target_ulong
)MSR_HVB
;
2478 if (msr_fe0
== msr_fe1
)
2482 case POWERPC_EXCP_INVAL
:
2483 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx
"\n", env
->nip
);
2485 new_msr
|= (target_ulong
)MSR_HVB
;
2488 case POWERPC_EXCP_PRIV
:
2490 new_msr
|= (target_ulong
)MSR_HVB
;
2493 case POWERPC_EXCP_TRAP
:
2495 new_msr
|= (target_ulong
)MSR_HVB
;
2499 /* Should never occur */
2500 cpu_abort(env
, "Invalid program exception %d. Aborting\n",
2505 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
2507 new_msr
|= (target_ulong
)MSR_HVB
;
2509 case POWERPC_EXCP_SYSCALL
: /* System call exception */
2511 lev
= env
->error_code
;
2512 if ((lev
== 1) && cpu_ppc_hypercall
) {
2513 cpu_ppc_hypercall(env
);
2516 if (lev
== 1 || (lpes0
== 0 && lpes1
== 0))
2517 new_msr
|= (target_ulong
)MSR_HVB
;
2519 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
2521 case POWERPC_EXCP_DECR
: /* Decrementer exception */
2523 new_msr
|= (target_ulong
)MSR_HVB
;
2525 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
2527 LOG_EXCP("FIT exception\n");
2529 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
2530 LOG_EXCP("WDT exception\n");
2531 switch (excp_model
) {
2532 case POWERPC_EXCP_BOOKE
:
2533 srr0
= SPR_BOOKE_CSRR0
;
2534 srr1
= SPR_BOOKE_CSRR1
;
2540 case POWERPC_EXCP_DTLB
: /* Data TLB error */
2542 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
2544 case POWERPC_EXCP_DEBUG
: /* Debug interrupt */
2545 switch (excp_model
) {
2546 case POWERPC_EXCP_BOOKE
:
2547 srr0
= SPR_BOOKE_DSRR0
;
2548 srr1
= SPR_BOOKE_DSRR1
;
2549 asrr0
= SPR_BOOKE_CSRR0
;
2550 asrr1
= SPR_BOOKE_CSRR1
;
2556 cpu_abort(env
, "Debug exception is not implemented yet !\n");
2558 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavailable */
2560 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data interrupt */
2562 cpu_abort(env
, "Embedded floating point data exception "
2563 "is not implemented yet !\n");
2565 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round interrupt */
2567 cpu_abort(env
, "Embedded floating point round exception "
2568 "is not implemented yet !\n");
2570 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor interrupt */
2573 "Performance counter exception is not implemented yet !\n");
2575 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
2578 "Embedded doorbell interrupt is not implemented yet !\n");
2580 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
2581 switch (excp_model
) {
2582 case POWERPC_EXCP_BOOKE
:
2583 srr0
= SPR_BOOKE_CSRR0
;
2584 srr1
= SPR_BOOKE_CSRR1
;
2590 cpu_abort(env
, "Embedded doorbell critical interrupt "
2591 "is not implemented yet !\n");
2593 case POWERPC_EXCP_RESET
: /* System reset exception */
2595 /* indicate that we resumed from power save mode */
2598 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
2602 /* XXX: find a suitable condition to enable the hypervisor mode */
2603 new_msr
|= (target_ulong
)MSR_HVB
;
2606 case POWERPC_EXCP_DSEG
: /* Data segment exception */
2608 new_msr
|= (target_ulong
)MSR_HVB
;
2610 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
2612 new_msr
|= (target_ulong
)MSR_HVB
;
2614 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
2617 new_msr
|= (target_ulong
)MSR_HVB
;
2618 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
2620 case POWERPC_EXCP_TRACE
: /* Trace exception */
2622 new_msr
|= (target_ulong
)MSR_HVB
;
2624 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
2627 new_msr
|= (target_ulong
)MSR_HVB
;
2628 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
2630 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage exception */
2633 new_msr
|= (target_ulong
)MSR_HVB
;
2634 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
2636 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
2639 new_msr
|= (target_ulong
)MSR_HVB
;
2640 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
2642 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment exception */
2645 new_msr
|= (target_ulong
)MSR_HVB
;
2646 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
2648 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
2650 new_msr
|= (target_ulong
)MSR_HVB
;
2652 case POWERPC_EXCP_PIT
: /* Programmable interval timer interrupt */
2653 LOG_EXCP("PIT exception\n");
2655 case POWERPC_EXCP_IO
: /* IO error exception */
2657 cpu_abort(env
, "601 IO error exception is not implemented yet !\n");
2659 case POWERPC_EXCP_RUNM
: /* Run mode exception */
2661 cpu_abort(env
, "601 run mode exception is not implemented yet !\n");
2663 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
2665 cpu_abort(env
, "602 emulation trap exception "
2666 "is not implemented yet !\n");
2668 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
2669 if (lpes1
== 0) /* XXX: check this */
2670 new_msr
|= (target_ulong
)MSR_HVB
;
2671 switch (excp_model
) {
2672 case POWERPC_EXCP_602
:
2673 case POWERPC_EXCP_603
:
2674 case POWERPC_EXCP_603E
:
2675 case POWERPC_EXCP_G2
:
2677 case POWERPC_EXCP_7x5
:
2679 case POWERPC_EXCP_74xx
:
2682 cpu_abort(env
, "Invalid instruction TLB miss exception\n");
2686 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
2687 if (lpes1
== 0) /* XXX: check this */
2688 new_msr
|= (target_ulong
)MSR_HVB
;
2689 switch (excp_model
) {
2690 case POWERPC_EXCP_602
:
2691 case POWERPC_EXCP_603
:
2692 case POWERPC_EXCP_603E
:
2693 case POWERPC_EXCP_G2
:
2695 case POWERPC_EXCP_7x5
:
2697 case POWERPC_EXCP_74xx
:
2700 cpu_abort(env
, "Invalid data load TLB miss exception\n");
2704 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
2705 if (lpes1
== 0) /* XXX: check this */
2706 new_msr
|= (target_ulong
)MSR_HVB
;
2707 switch (excp_model
) {
2708 case POWERPC_EXCP_602
:
2709 case POWERPC_EXCP_603
:
2710 case POWERPC_EXCP_603E
:
2711 case POWERPC_EXCP_G2
:
2713 /* Swap temporary saved registers with GPRs */
2714 if (!(new_msr
& ((target_ulong
)1 << MSR_TGPR
))) {
2715 new_msr
|= (target_ulong
)1 << MSR_TGPR
;
2716 hreg_swap_gpr_tgpr(env
);
2719 case POWERPC_EXCP_7x5
:
2721 #if defined (DEBUG_SOFTWARE_TLB)
2722 if (qemu_log_enabled()) {
2724 target_ulong
*miss
, *cmp
;
2726 if (excp
== POWERPC_EXCP_IFTLB
) {
2729 miss
= &env
->spr
[SPR_IMISS
];
2730 cmp
= &env
->spr
[SPR_ICMP
];
2732 if (excp
== POWERPC_EXCP_DLTLB
)
2737 miss
= &env
->spr
[SPR_DMISS
];
2738 cmp
= &env
->spr
[SPR_DCMP
];
2740 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
2741 TARGET_FMT_lx
" H1 " TARGET_FMT_lx
" H2 "
2742 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
2743 env
->spr
[SPR_HASH1
], env
->spr
[SPR_HASH2
],
2747 msr
|= env
->crf
[0] << 28;
2748 msr
|= env
->error_code
; /* key, D/I, S/L bits */
2749 /* Set way using a LRU mechanism */
2750 msr
|= ((env
->last_way
+ 1) & (env
->nb_ways
- 1)) << 17;
2752 case POWERPC_EXCP_74xx
:
2754 #if defined (DEBUG_SOFTWARE_TLB)
2755 if (qemu_log_enabled()) {
2757 target_ulong
*miss
, *cmp
;
2759 if (excp
== POWERPC_EXCP_IFTLB
) {
2762 miss
= &env
->spr
[SPR_TLBMISS
];
2763 cmp
= &env
->spr
[SPR_PTEHI
];
2765 if (excp
== POWERPC_EXCP_DLTLB
)
2770 miss
= &env
->spr
[SPR_TLBMISS
];
2771 cmp
= &env
->spr
[SPR_PTEHI
];
2773 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
2774 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
2778 msr
|= env
->error_code
; /* key bit */
2781 cpu_abort(env
, "Invalid data store TLB miss exception\n");
2785 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
2787 cpu_abort(env
, "Floating point assist exception "
2788 "is not implemented yet !\n");
2790 case POWERPC_EXCP_DABR
: /* Data address breakpoint */
2792 cpu_abort(env
, "DABR exception is not implemented yet !\n");
2794 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
2796 cpu_abort(env
, "IABR exception is not implemented yet !\n");
2798 case POWERPC_EXCP_SMI
: /* System management interrupt */
2800 cpu_abort(env
, "SMI exception is not implemented yet !\n");
2802 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
2804 cpu_abort(env
, "Thermal management exception "
2805 "is not implemented yet !\n");
2807 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor interrupt */
2809 new_msr
|= (target_ulong
)MSR_HVB
;
2812 "Performance counter exception is not implemented yet !\n");
2814 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
2816 cpu_abort(env
, "VPU assist exception is not implemented yet !\n");
2818 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
2821 "970 soft-patch exception is not implemented yet !\n");
2823 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
2826 "970 maintenance exception is not implemented yet !\n");
2828 case POWERPC_EXCP_MEXTBR
: /* Maskable external breakpoint */
2830 cpu_abort(env
, "Maskable external exception "
2831 "is not implemented yet !\n");
2833 case POWERPC_EXCP_NMEXTBR
: /* Non maskable external breakpoint */
2835 cpu_abort(env
, "Non maskable external exception "
2836 "is not implemented yet !\n");
2840 cpu_abort(env
, "Invalid PowerPC exception %d. Aborting\n", excp
);
2843 /* save current instruction location */
2844 env
->spr
[srr0
] = env
->nip
- 4;
2847 /* save next instruction location */
2848 env
->spr
[srr0
] = env
->nip
;
2852 env
->spr
[srr1
] = msr
;
2853 /* If any alternate SRR register are defined, duplicate saved values */
2855 env
->spr
[asrr0
] = env
->spr
[srr0
];
2857 env
->spr
[asrr1
] = env
->spr
[srr1
];
2858 /* If we disactivated any translation, flush TLBs */
2859 if (new_msr
& ((1 << MSR_IR
) | (1 << MSR_DR
)))
2863 new_msr
|= (target_ulong
)1 << MSR_LE
;
2866 /* Jump to handler */
2867 vector
= env
->excp_vectors
[excp
];
2868 if (vector
== (target_ulong
)-1ULL) {
2869 cpu_abort(env
, "Raised an exception without defined vector %d\n",
2872 vector
|= env
->excp_prefix
;
2873 #if defined(TARGET_PPC64)
2874 if (excp_model
== POWERPC_EXCP_BOOKE
) {
2876 vector
= (uint32_t)vector
;
2878 new_msr
|= (target_ulong
)1 << MSR_CM
;
2881 if (!msr_isf
&& !(env
->mmu_model
& POWERPC_MMU_64
)) {
2882 vector
= (uint32_t)vector
;
2884 new_msr
|= (target_ulong
)1 << MSR_SF
;
2888 /* XXX: we don't use hreg_store_msr here as already have treated
2889 * any special case that could occur. Just store MSR and update hflags
2891 env
->msr
= new_msr
& env
->msr_mask
;
2892 hreg_compute_hflags(env
);
2894 /* Reset exception state */
2895 env
->exception_index
= POWERPC_EXCP_NONE
;
2896 env
->error_code
= 0;
2898 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
2899 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
2900 /* XXX: The BookE changes address space when switching modes,
2901 we should probably implement that as different MMU indexes,
2902 but for the moment we do it the slow way and flush all. */
2907 void do_interrupt (CPUState
*env
)
2909 powerpc_excp(env
, env
->excp_model
, env
->exception_index
);
2912 void ppc_hw_interrupt (CPUPPCState
*env
)
2917 qemu_log_mask(CPU_LOG_INT
, "%s: %p pending %08x req %08x me %d ee %d\n",
2918 __func__
, env
, env
->pending_interrupts
,
2919 env
->interrupt_request
, (int)msr_me
, (int)msr_ee
);
2921 /* External reset */
2922 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_RESET
)) {
2923 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_RESET
);
2924 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_RESET
);
2927 /* Machine check exception */
2928 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_MCK
)) {
2929 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_MCK
);
2930 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_MCHECK
);
2934 /* External debug exception */
2935 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DEBUG
)) {
2936 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DEBUG
);
2937 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DEBUG
);
2942 /* XXX: find a suitable condition to enable the hypervisor mode */
2943 hdice
= env
->spr
[SPR_LPCR
] & 1;
2947 if ((msr_ee
!= 0 || msr_hv
== 0 || msr_pr
!= 0) && hdice
!= 0) {
2948 /* Hypervisor decrementer exception */
2949 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDECR
)) {
2950 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDECR
);
2951 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_HDECR
);
2956 /* External critical interrupt */
2957 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CEXT
)) {
2958 /* Taking a critical external interrupt does not clear the external
2959 * critical interrupt status
2962 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_CEXT
);
2964 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_CRITICAL
);
2969 /* Watchdog timer on embedded PowerPC */
2970 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_WDT
)) {
2971 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_WDT
);
2972 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_WDT
);
2975 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CDOORBELL
)) {
2976 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_CDOORBELL
);
2977 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DOORCI
);
2980 /* Fixed interval timer on embedded PowerPC */
2981 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_FIT
)) {
2982 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_FIT
);
2983 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_FIT
);
2986 /* Programmable interval timer on embedded PowerPC */
2987 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PIT
)) {
2988 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PIT
);
2989 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_PIT
);
2992 /* Decrementer exception */
2993 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DECR
)) {
2994 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DECR
);
2995 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DECR
);
2998 /* External interrupt */
2999 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_EXT
)) {
3000 /* Taking an external interrupt does not clear the external
3004 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_EXT
);
3006 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_EXTERNAL
);
3009 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DOORBELL
)) {
3010 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
3011 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DOORI
);
3014 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PERFM
)) {
3015 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PERFM
);
3016 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_PERFM
);
3019 /* Thermal interrupt */
3020 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_THERM
)) {
3021 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_THERM
);
3022 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_THERM
);
3027 #endif /* !CONFIG_USER_ONLY */
3029 void cpu_dump_rfi (target_ulong RA
, target_ulong msr
)
3031 qemu_log("Return from exception at " TARGET_FMT_lx
" with flags "
3032 TARGET_FMT_lx
"\n", RA
, msr
);
3035 void cpu_reset(CPUPPCState
*env
)
3039 if (qemu_loglevel_mask(CPU_LOG_RESET
)) {
3040 qemu_log("CPU Reset (CPU %d)\n", env
->cpu_index
);
3041 log_cpu_state(env
, 0);
3044 msr
= (target_ulong
)0;
3046 /* XXX: find a suitable condition to enable the hypervisor mode */
3047 msr
|= (target_ulong
)MSR_HVB
;
3049 msr
|= (target_ulong
)0 << MSR_AP
; /* TO BE CHECKED */
3050 msr
|= (target_ulong
)0 << MSR_SA
; /* TO BE CHECKED */
3051 msr
|= (target_ulong
)1 << MSR_EP
;
3052 #if defined (DO_SINGLE_STEP) && 0
3053 /* Single step trace mode */
3054 msr
|= (target_ulong
)1 << MSR_SE
;
3055 msr
|= (target_ulong
)1 << MSR_BE
;
3057 #if defined(CONFIG_USER_ONLY)
3058 msr
|= (target_ulong
)1 << MSR_FP
; /* Allow floating point usage */
3059 msr
|= (target_ulong
)1 << MSR_VR
; /* Allow altivec usage */
3060 msr
|= (target_ulong
)1 << MSR_SPE
; /* Allow SPE usage */
3061 msr
|= (target_ulong
)1 << MSR_PR
;
3063 env
->excp_prefix
= env
->hreset_excp_prefix
;
3064 env
->nip
= env
->hreset_vector
| env
->excp_prefix
;
3065 if (env
->mmu_model
!= POWERPC_MMU_REAL
)
3066 ppc_tlb_invalidate_all(env
);
3068 env
->msr
= msr
& env
->msr_mask
;
3069 #if defined(TARGET_PPC64)
3070 if (env
->mmu_model
& POWERPC_MMU_64
)
3071 env
->msr
|= (1ULL << MSR_SF
);
3073 hreg_compute_hflags(env
);
3074 env
->reserve_addr
= (target_ulong
)-1ULL;
3075 /* Be sure no exception or interrupt is pending */
3076 env
->pending_interrupts
= 0;
3077 env
->exception_index
= POWERPC_EXCP_NONE
;
3078 env
->error_code
= 0;
3079 /* Flush all TLBs */
3083 CPUPPCState
*cpu_ppc_init (const char *cpu_model
)
3086 const ppc_def_t
*def
;
3088 def
= cpu_ppc_find_by_name(cpu_model
);
3092 env
= g_malloc0(sizeof(CPUPPCState
));
3094 if (tcg_enabled()) {
3095 ppc_translate_init();
3097 env
->cpu_model_str
= cpu_model
;
3098 cpu_ppc_register_internal(env
, def
);
3100 qemu_init_vcpu(env
);
3105 void cpu_ppc_close (CPUPPCState
*env
)
3107 /* Should also remove all opcode tables... */