4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 /* Sparc MMU emulation */
25 #if defined(CONFIG_USER_ONLY)
27 int cpu_sparc_handle_mmu_fault(CPUState
*env1
, target_ulong address
, int rw
,
31 env1
->exception_index
= TT_TFAULT
;
33 env1
->exception_index
= TT_DFAULT
;
40 #ifndef TARGET_SPARC64
42 * Sparc V8 Reference MMU (SRMMU)
44 static const int access_table
[8][8] = {
45 { 0, 0, 0, 0, 8, 0, 12, 12 },
46 { 0, 0, 0, 0, 8, 0, 0, 0 },
47 { 8, 8, 0, 0, 0, 8, 12, 12 },
48 { 8, 8, 0, 0, 0, 8, 0, 0 },
49 { 8, 0, 8, 0, 8, 8, 12, 12 },
50 { 8, 0, 8, 0, 8, 0, 8, 0 },
51 { 8, 8, 8, 0, 8, 8, 12, 12 },
52 { 8, 8, 8, 0, 8, 8, 8, 0 }
55 static const int perm_table
[2][8] = {
58 PAGE_READ
| PAGE_WRITE
,
59 PAGE_READ
| PAGE_EXEC
,
60 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
62 PAGE_READ
| PAGE_WRITE
,
63 PAGE_READ
| PAGE_EXEC
,
64 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
68 PAGE_READ
| PAGE_WRITE
,
69 PAGE_READ
| PAGE_EXEC
,
70 PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
78 static int get_physical_address(CPUState
*env
, target_phys_addr_t
*physical
,
79 int *prot
, int *access_index
,
80 target_ulong address
, int rw
, int mmu_idx
,
81 target_ulong
*page_size
)
84 target_phys_addr_t pde_ptr
;
86 int error_code
= 0, is_dirty
, is_user
;
87 unsigned long page_offset
;
89 is_user
= mmu_idx
== MMU_USER_IDX
;
91 if ((env
->mmuregs
[0] & MMU_E
) == 0) { /* MMU disabled */
92 *page_size
= TARGET_PAGE_SIZE
;
93 /* Boot mode: instruction fetches are taken from PROM */
94 if (rw
== 2 && (env
->mmuregs
[0] & env
->def
->mmu_bm
)) {
95 *physical
= env
->prom_addr
| (address
& 0x7ffffULL
);
96 *prot
= PAGE_READ
| PAGE_EXEC
;
100 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
104 *access_index
= ((rw
& 1) << 2) | (rw
& 2) | (is_user
? 0 : 1);
105 *physical
= 0xffffffffffff0000ULL
;
107 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
108 /* Context base + context number */
109 pde_ptr
= (env
->mmuregs
[1] << 4) + (env
->mmuregs
[2] << 2);
110 pde
= ldl_phys(pde_ptr
);
113 switch (pde
& PTE_ENTRYTYPE_MASK
) {
115 case 0: /* Invalid */
117 case 2: /* L0 PTE, maybe should not happen? */
118 case 3: /* Reserved */
121 pde_ptr
= ((address
>> 22) & ~3) + ((pde
& ~3) << 4);
122 pde
= ldl_phys(pde_ptr
);
124 switch (pde
& PTE_ENTRYTYPE_MASK
) {
126 case 0: /* Invalid */
127 return (1 << 8) | (1 << 2);
128 case 3: /* Reserved */
129 return (1 << 8) | (4 << 2);
131 pde_ptr
= ((address
& 0xfc0000) >> 16) + ((pde
& ~3) << 4);
132 pde
= ldl_phys(pde_ptr
);
134 switch (pde
& PTE_ENTRYTYPE_MASK
) {
136 case 0: /* Invalid */
137 return (2 << 8) | (1 << 2);
138 case 3: /* Reserved */
139 return (2 << 8) | (4 << 2);
141 pde_ptr
= ((address
& 0x3f000) >> 10) + ((pde
& ~3) << 4);
142 pde
= ldl_phys(pde_ptr
);
144 switch (pde
& PTE_ENTRYTYPE_MASK
) {
146 case 0: /* Invalid */
147 return (3 << 8) | (1 << 2);
148 case 1: /* PDE, should not happen */
149 case 3: /* Reserved */
150 return (3 << 8) | (4 << 2);
152 page_offset
= (address
& TARGET_PAGE_MASK
) &
153 (TARGET_PAGE_SIZE
- 1);
155 *page_size
= TARGET_PAGE_SIZE
;
158 page_offset
= address
& 0x3ffff;
159 *page_size
= 0x40000;
163 page_offset
= address
& 0xffffff;
164 *page_size
= 0x1000000;
169 access_perms
= (pde
& PTE_ACCESS_MASK
) >> PTE_ACCESS_SHIFT
;
170 error_code
= access_table
[*access_index
][access_perms
];
171 if (error_code
&& !((env
->mmuregs
[0] & MMU_NF
) && is_user
)) {
175 /* update page modified and dirty bits */
176 is_dirty
= (rw
& 1) && !(pde
& PG_MODIFIED_MASK
);
177 if (!(pde
& PG_ACCESSED_MASK
) || is_dirty
) {
178 pde
|= PG_ACCESSED_MASK
;
180 pde
|= PG_MODIFIED_MASK
;
182 stl_phys_notdirty(pde_ptr
, pde
);
185 /* the page can be put in the TLB */
186 *prot
= perm_table
[is_user
][access_perms
];
187 if (!(pde
& PG_MODIFIED_MASK
)) {
188 /* only set write access if already dirty... otherwise wait
190 *prot
&= ~PAGE_WRITE
;
193 /* Even if large ptes, we map only one 4KB page in the cache to
194 avoid filling it too fast */
195 *physical
= ((target_phys_addr_t
)(pde
& PTE_ADDR_MASK
) << 4) + page_offset
;
199 /* Perform address translation */
200 int cpu_sparc_handle_mmu_fault(CPUState
*env
, target_ulong address
, int rw
,
203 target_phys_addr_t paddr
;
205 target_ulong page_size
;
206 int error_code
= 0, prot
, access_index
;
208 error_code
= get_physical_address(env
, &paddr
, &prot
, &access_index
,
209 address
, rw
, mmu_idx
, &page_size
);
210 if (error_code
== 0) {
211 vaddr
= address
& TARGET_PAGE_MASK
;
212 paddr
&= TARGET_PAGE_MASK
;
214 printf("Translate at " TARGET_FMT_lx
" -> " TARGET_FMT_plx
", vaddr "
215 TARGET_FMT_lx
"\n", address
, paddr
, vaddr
);
217 tlb_set_page(env
, vaddr
, paddr
, prot
, mmu_idx
, page_size
);
221 if (env
->mmuregs
[3]) { /* Fault status register */
222 env
->mmuregs
[3] = 1; /* overflow (not read before another fault) */
224 env
->mmuregs
[3] |= (access_index
<< 5) | error_code
| 2;
225 env
->mmuregs
[4] = address
; /* Fault address register */
227 if ((env
->mmuregs
[0] & MMU_NF
) || env
->psret
== 0) {
228 /* No fault mode: if a mapping is available, just override
229 permissions. If no mapping is available, redirect accesses to
230 neverland. Fake/overridden mappings will be flushed when
231 switching to normal mode. */
232 vaddr
= address
& TARGET_PAGE_MASK
;
233 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
234 tlb_set_page(env
, vaddr
, paddr
, prot
, mmu_idx
, TARGET_PAGE_SIZE
);
238 env
->exception_index
= TT_TFAULT
;
240 env
->exception_index
= TT_DFAULT
;
246 target_ulong
mmu_probe(CPUState
*env
, target_ulong address
, int mmulev
)
248 target_phys_addr_t pde_ptr
;
251 /* Context base + context number */
252 pde_ptr
= (target_phys_addr_t
)(env
->mmuregs
[1] << 4) +
253 (env
->mmuregs
[2] << 2);
254 pde
= ldl_phys(pde_ptr
);
256 switch (pde
& PTE_ENTRYTYPE_MASK
) {
258 case 0: /* Invalid */
259 case 2: /* PTE, maybe should not happen? */
260 case 3: /* Reserved */
266 pde_ptr
= ((address
>> 22) & ~3) + ((pde
& ~3) << 4);
267 pde
= ldl_phys(pde_ptr
);
269 switch (pde
& PTE_ENTRYTYPE_MASK
) {
271 case 0: /* Invalid */
272 case 3: /* Reserved */
280 pde_ptr
= ((address
& 0xfc0000) >> 16) + ((pde
& ~3) << 4);
281 pde
= ldl_phys(pde_ptr
);
283 switch (pde
& PTE_ENTRYTYPE_MASK
) {
285 case 0: /* Invalid */
286 case 3: /* Reserved */
294 pde_ptr
= ((address
& 0x3f000) >> 10) + ((pde
& ~3) << 4);
295 pde
= ldl_phys(pde_ptr
);
297 switch (pde
& PTE_ENTRYTYPE_MASK
) {
299 case 0: /* Invalid */
300 case 1: /* PDE, should not happen */
301 case 3: /* Reserved */
312 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUState
*env
)
314 target_ulong va
, va1
, va2
;
315 unsigned int n
, m
, o
;
316 target_phys_addr_t pde_ptr
, pa
;
319 pde_ptr
= (env
->mmuregs
[1] << 4) + (env
->mmuregs
[2] << 2);
320 pde
= ldl_phys(pde_ptr
);
321 (*cpu_fprintf
)(f
, "Root ptr: " TARGET_FMT_plx
", ctx: %d\n",
322 (target_phys_addr_t
)env
->mmuregs
[1] << 4, env
->mmuregs
[2]);
323 for (n
= 0, va
= 0; n
< 256; n
++, va
+= 16 * 1024 * 1024) {
324 pde
= mmu_probe(env
, va
, 2);
326 pa
= cpu_get_phys_page_debug(env
, va
);
327 (*cpu_fprintf
)(f
, "VA: " TARGET_FMT_lx
", PA: " TARGET_FMT_plx
328 " PDE: " TARGET_FMT_lx
"\n", va
, pa
, pde
);
329 for (m
= 0, va1
= va
; m
< 64; m
++, va1
+= 256 * 1024) {
330 pde
= mmu_probe(env
, va1
, 1);
332 pa
= cpu_get_phys_page_debug(env
, va1
);
333 (*cpu_fprintf
)(f
, " VA: " TARGET_FMT_lx
", PA: "
334 TARGET_FMT_plx
" PDE: " TARGET_FMT_lx
"\n",
336 for (o
= 0, va2
= va1
; o
< 64; o
++, va2
+= 4 * 1024) {
337 pde
= mmu_probe(env
, va2
, 0);
339 pa
= cpu_get_phys_page_debug(env
, va2
);
340 (*cpu_fprintf
)(f
, " VA: " TARGET_FMT_lx
", PA: "
341 TARGET_FMT_plx
" PTE: "
352 /* Gdb expects all registers windows to be flushed in ram. This function handles
353 * reads (and only reads) in stack frames as if windows were flushed. We assume
354 * that the sparc ABI is followed.
356 int target_memory_rw_debug(CPUState
*env
, target_ulong addr
,
357 uint8_t *buf
, int len
, int is_write
)
364 for (i
= 0; i
< env
->nwindows
; i
++) {
366 target_ulong fp
= env
->regbase
[cwp
* 16 + 22];
368 /* Assume fp == 0 means end of frame. */
373 cwp
= cpu_cwp_inc(env
, cwp
+ 1);
375 /* Invalid window ? */
376 if (env
->wim
& (1 << cwp
)) {
380 /* According to the ABI, the stack is growing downward. */
381 if (addr
+ len
< fp
) {
385 /* Not in this frame. */
386 if (addr
> fp
+ 64) {
390 /* Handle access before this window. */
393 if (cpu_memory_rw_debug(env
, addr
, buf
, len1
, is_write
) != 0) {
401 /* Access byte per byte to registers. Not very efficient but speed
411 for (; len1
; len1
--) {
412 int reg
= cwp
* 16 + 8 + (off
>> 2);
417 u
.v
= cpu_to_be32(env
->regbase
[reg
]);
418 *buf
++ = u
.c
[off
& 3];
429 return cpu_memory_rw_debug(env
, addr
, buf
, len
, is_write
);
432 #else /* !TARGET_SPARC64 */
434 /* 41 bit physical address space */
435 static inline target_phys_addr_t
ultrasparc_truncate_physical(uint64_t x
)
437 return x
& 0x1ffffffffffULL
;
441 * UltraSparc IIi I/DMMUs
444 /* Returns true if TTE tag is valid and matches virtual address value
445 in context requires virtual address mask value calculated from TTE
447 static inline int ultrasparc_tag_match(SparcTLBEntry
*tlb
,
448 uint64_t address
, uint64_t context
,
449 target_phys_addr_t
*physical
)
453 switch (TTE_PGSIZE(tlb
->tte
)) {
456 mask
= 0xffffffffffffe000ULL
;
459 mask
= 0xffffffffffff0000ULL
;
462 mask
= 0xfffffffffff80000ULL
;
465 mask
= 0xffffffffffc00000ULL
;
469 /* valid, context match, virtual address match? */
470 if (TTE_IS_VALID(tlb
->tte
) &&
471 (TTE_IS_GLOBAL(tlb
->tte
) || tlb_compare_context(tlb
, context
))
472 && compare_masked(address
, tlb
->tag
, mask
)) {
473 /* decode physical address */
474 *physical
= ((tlb
->tte
& mask
) | (address
& ~mask
)) & 0x1ffffffe000ULL
;
481 static int get_physical_address_data(CPUState
*env
,
482 target_phys_addr_t
*physical
, int *prot
,
483 target_ulong address
, int rw
, int mmu_idx
)
489 int is_user
= (mmu_idx
== MMU_USER_IDX
||
490 mmu_idx
== MMU_USER_SECONDARY_IDX
);
492 if ((env
->lsu
& DMMU_E
) == 0) { /* DMMU disabled */
493 *physical
= ultrasparc_truncate_physical(address
);
494 *prot
= PAGE_READ
| PAGE_WRITE
;
501 context
= env
->dmmu
.mmu_primary_context
& 0x1fff;
502 sfsr
|= SFSR_CT_PRIMARY
;
504 case MMU_USER_SECONDARY_IDX
:
505 case MMU_KERNEL_SECONDARY_IDX
:
506 context
= env
->dmmu
.mmu_secondary_context
& 0x1fff;
507 sfsr
|= SFSR_CT_SECONDARY
;
509 case MMU_NUCLEUS_IDX
:
510 sfsr
|= SFSR_CT_NUCLEUS
;
518 sfsr
|= SFSR_WRITE_BIT
;
519 } else if (rw
== 4) {
523 for (i
= 0; i
< 64; i
++) {
524 /* ctx match, vaddr match, valid? */
525 if (ultrasparc_tag_match(&env
->dtlb
[i
], address
, context
, physical
)) {
529 /* multiple bits in SFSR.FT may be set on TT_DFAULT */
530 if (TTE_IS_PRIV(env
->dtlb
[i
].tte
) && is_user
) {
532 sfsr
|= SFSR_FT_PRIV_BIT
; /* privilege violation */
533 trace_mmu_helper_dfault(address
, context
, mmu_idx
, env
->tl
);
536 if (TTE_IS_SIDEEFFECT(env
->dtlb
[i
].tte
)) {
538 sfsr
|= SFSR_FT_NF_E_BIT
;
541 if (TTE_IS_NFO(env
->dtlb
[i
].tte
)) {
543 sfsr
|= SFSR_FT_NFO_BIT
;
548 /* faults above are reported with TT_DFAULT. */
549 env
->exception_index
= TT_DFAULT
;
550 } else if (!TTE_IS_W_OK(env
->dtlb
[i
].tte
) && (rw
== 1)) {
552 env
->exception_index
= TT_DPROT
;
554 trace_mmu_helper_dprot(address
, context
, mmu_idx
, env
->tl
);
559 if (TTE_IS_W_OK(env
->dtlb
[i
].tte
)) {
563 TTE_SET_USED(env
->dtlb
[i
].tte
);
568 if (env
->dmmu
.sfsr
& SFSR_VALID_BIT
) { /* Fault status register */
569 sfsr
|= SFSR_OW_BIT
; /* overflow (not read before
573 if (env
->pstate
& PS_PRIV
) {
577 /* FIXME: ASI field in SFSR must be set */
578 env
->dmmu
.sfsr
= sfsr
| SFSR_VALID_BIT
;
580 env
->dmmu
.sfar
= address
; /* Fault address register */
582 env
->dmmu
.tag_access
= (address
& ~0x1fffULL
) | context
;
588 trace_mmu_helper_dmiss(address
, context
);
592 * - UltraSPARC IIi: SFSR and SFAR unmodified
593 * - JPS1: SFAR updated and some fields of SFSR updated
595 env
->dmmu
.tag_access
= (address
& ~0x1fffULL
) | context
;
596 env
->exception_index
= TT_DMISS
;
600 static int get_physical_address_code(CPUState
*env
,
601 target_phys_addr_t
*physical
, int *prot
,
602 target_ulong address
, int mmu_idx
)
607 int is_user
= (mmu_idx
== MMU_USER_IDX
||
608 mmu_idx
== MMU_USER_SECONDARY_IDX
);
610 if ((env
->lsu
& IMMU_E
) == 0 || (env
->pstate
& PS_RED
) != 0) {
612 *physical
= ultrasparc_truncate_physical(address
);
618 /* PRIMARY context */
619 context
= env
->dmmu
.mmu_primary_context
& 0x1fff;
621 /* NUCLEUS context */
625 for (i
= 0; i
< 64; i
++) {
626 /* ctx match, vaddr match, valid? */
627 if (ultrasparc_tag_match(&env
->itlb
[i
],
628 address
, context
, physical
)) {
630 if (TTE_IS_PRIV(env
->itlb
[i
].tte
) && is_user
) {
631 /* Fault status register */
632 if (env
->immu
.sfsr
& SFSR_VALID_BIT
) {
633 env
->immu
.sfsr
= SFSR_OW_BIT
; /* overflow (not read before
638 if (env
->pstate
& PS_PRIV
) {
639 env
->immu
.sfsr
|= SFSR_PR_BIT
;
642 env
->immu
.sfsr
|= SFSR_CT_NUCLEUS
;
645 /* FIXME: ASI field in SFSR must be set */
646 env
->immu
.sfsr
|= SFSR_FT_PRIV_BIT
| SFSR_VALID_BIT
;
647 env
->exception_index
= TT_TFAULT
;
649 env
->immu
.tag_access
= (address
& ~0x1fffULL
) | context
;
651 trace_mmu_helper_tfault(address
, context
);
656 TTE_SET_USED(env
->itlb
[i
].tte
);
661 trace_mmu_helper_tmiss(address
, context
);
663 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
664 env
->immu
.tag_access
= (address
& ~0x1fffULL
) | context
;
665 env
->exception_index
= TT_TMISS
;
669 static int get_physical_address(CPUState
*env
, target_phys_addr_t
*physical
,
670 int *prot
, int *access_index
,
671 target_ulong address
, int rw
, int mmu_idx
,
672 target_ulong
*page_size
)
674 /* ??? We treat everything as a small page, then explicitly flush
675 everything when an entry is evicted. */
676 *page_size
= TARGET_PAGE_SIZE
;
678 /* safety net to catch wrong softmmu index use from dynamic code */
679 if (env
->tl
> 0 && mmu_idx
!= MMU_NUCLEUS_IDX
) {
681 trace_mmu_helper_get_phys_addr_code(env
->tl
, mmu_idx
,
682 env
->dmmu
.mmu_primary_context
,
683 env
->dmmu
.mmu_secondary_context
,
686 trace_mmu_helper_get_phys_addr_data(env
->tl
, mmu_idx
,
687 env
->dmmu
.mmu_primary_context
,
688 env
->dmmu
.mmu_secondary_context
,
694 return get_physical_address_code(env
, physical
, prot
, address
,
697 return get_physical_address_data(env
, physical
, prot
, address
, rw
,
702 /* Perform address translation */
703 int cpu_sparc_handle_mmu_fault(CPUState
*env
, target_ulong address
, int rw
,
706 target_ulong virt_addr
, vaddr
;
707 target_phys_addr_t paddr
;
708 target_ulong page_size
;
709 int error_code
= 0, prot
, access_index
;
711 error_code
= get_physical_address(env
, &paddr
, &prot
, &access_index
,
712 address
, rw
, mmu_idx
, &page_size
);
713 if (error_code
== 0) {
714 virt_addr
= address
& TARGET_PAGE_MASK
;
715 vaddr
= virt_addr
+ ((address
& TARGET_PAGE_MASK
) &
716 (TARGET_PAGE_SIZE
- 1));
718 trace_mmu_helper_mmu_fault(address
, paddr
, mmu_idx
, env
->tl
,
719 env
->dmmu
.mmu_primary_context
,
720 env
->dmmu
.mmu_secondary_context
);
722 tlb_set_page(env
, vaddr
, paddr
, prot
, mmu_idx
, page_size
);
729 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUState
*env
)
734 (*cpu_fprintf
)(f
, "MMU contexts: Primary: %" PRId64
", Secondary: %"
736 env
->dmmu
.mmu_primary_context
,
737 env
->dmmu
.mmu_secondary_context
);
738 if ((env
->lsu
& DMMU_E
) == 0) {
739 (*cpu_fprintf
)(f
, "DMMU disabled\n");
741 (*cpu_fprintf
)(f
, "DMMU dump\n");
742 for (i
= 0; i
< 64; i
++) {
743 switch (TTE_PGSIZE(env
->dtlb
[i
].tte
)) {
758 if (TTE_IS_VALID(env
->dtlb
[i
].tte
)) {
759 (*cpu_fprintf
)(f
, "[%02u] VA: %" PRIx64
", PA: %llx"
760 ", %s, %s, %s, %s, ctx %" PRId64
" %s\n",
762 env
->dtlb
[i
].tag
& (uint64_t)~0x1fffULL
,
763 TTE_PA(env
->dtlb
[i
].tte
),
765 TTE_IS_PRIV(env
->dtlb
[i
].tte
) ? "priv" : "user",
766 TTE_IS_W_OK(env
->dtlb
[i
].tte
) ? "RW" : "RO",
767 TTE_IS_LOCKED(env
->dtlb
[i
].tte
) ?
768 "locked" : "unlocked",
769 env
->dtlb
[i
].tag
& (uint64_t)0x1fffULL
,
770 TTE_IS_GLOBAL(env
->dtlb
[i
].tte
) ?
775 if ((env
->lsu
& IMMU_E
) == 0) {
776 (*cpu_fprintf
)(f
, "IMMU disabled\n");
778 (*cpu_fprintf
)(f
, "IMMU dump\n");
779 for (i
= 0; i
< 64; i
++) {
780 switch (TTE_PGSIZE(env
->itlb
[i
].tte
)) {
795 if (TTE_IS_VALID(env
->itlb
[i
].tte
)) {
796 (*cpu_fprintf
)(f
, "[%02u] VA: %" PRIx64
", PA: %llx"
797 ", %s, %s, %s, ctx %" PRId64
" %s\n",
799 env
->itlb
[i
].tag
& (uint64_t)~0x1fffULL
,
800 TTE_PA(env
->itlb
[i
].tte
),
802 TTE_IS_PRIV(env
->itlb
[i
].tte
) ? "priv" : "user",
803 TTE_IS_LOCKED(env
->itlb
[i
].tte
) ?
804 "locked" : "unlocked",
805 env
->itlb
[i
].tag
& (uint64_t)0x1fffULL
,
806 TTE_IS_GLOBAL(env
->itlb
[i
].tte
) ?
813 #endif /* TARGET_SPARC64 */
815 static int cpu_sparc_get_phys_page(CPUState
*env
, target_phys_addr_t
*phys
,
816 target_ulong addr
, int rw
, int mmu_idx
)
818 target_ulong page_size
;
819 int prot
, access_index
;
821 return get_physical_address(env
, phys
, &prot
, &access_index
, addr
, rw
,
822 mmu_idx
, &page_size
);
825 #if defined(TARGET_SPARC64)
826 target_phys_addr_t
cpu_get_phys_page_nofault(CPUState
*env
, target_ulong addr
,
829 target_phys_addr_t phys_addr
;
831 if (cpu_sparc_get_phys_page(env
, &phys_addr
, addr
, 4, mmu_idx
) != 0) {
838 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
840 target_phys_addr_t phys_addr
;
841 int mmu_idx
= cpu_mmu_index(env
);
843 if (cpu_sparc_get_phys_page(env
, &phys_addr
, addr
, 2, mmu_idx
) != 0) {
844 if (cpu_sparc_get_phys_page(env
, &phys_addr
, addr
, 0, mmu_idx
) != 0) {
848 if (cpu_get_physical_page_desc(phys_addr
) == IO_MEM_UNASSIGNED
) {