target/sparc: Prefer fast cpu_env() over slower CPU QOM cast macro
[qemu/kevin.git] / target / sparc / mmu_helper.c
blobad1591d9fdc0ac47e15324e16f8835eaae976dce
1 /*
2 * Sparc MMU helpers
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "qemu/qemu-print.h"
25 #include "trace.h"
27 /* Sparc MMU emulation */
29 #ifndef TARGET_SPARC64
31 * Sparc V8 Reference MMU (SRMMU)
33 static const int access_table[8][8] = {
34 { 0, 0, 0, 0, 8, 0, 12, 12 },
35 { 0, 0, 0, 0, 8, 0, 0, 0 },
36 { 8, 8, 0, 0, 0, 8, 12, 12 },
37 { 8, 8, 0, 0, 0, 8, 0, 0 },
38 { 8, 0, 8, 0, 8, 8, 12, 12 },
39 { 8, 0, 8, 0, 8, 0, 8, 0 },
40 { 8, 8, 8, 0, 8, 8, 12, 12 },
41 { 8, 8, 8, 0, 8, 8, 8, 0 }
44 static const int perm_table[2][8] = {
46 PAGE_READ,
47 PAGE_READ | PAGE_WRITE,
48 PAGE_READ | PAGE_EXEC,
49 PAGE_READ | PAGE_WRITE | PAGE_EXEC,
50 PAGE_EXEC,
51 PAGE_READ | PAGE_WRITE,
52 PAGE_READ | PAGE_EXEC,
53 PAGE_READ | PAGE_WRITE | PAGE_EXEC
56 PAGE_READ,
57 PAGE_READ | PAGE_WRITE,
58 PAGE_READ | PAGE_EXEC,
59 PAGE_READ | PAGE_WRITE | PAGE_EXEC,
60 PAGE_EXEC,
61 PAGE_READ,
67 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full,
68 int *access_index, target_ulong address,
69 int rw, int mmu_idx)
71 int access_perms = 0;
72 hwaddr pde_ptr;
73 uint32_t pde;
74 int error_code = 0, is_dirty, is_user;
75 unsigned long page_offset;
76 CPUState *cs = env_cpu(env);
77 MemTxResult result;
79 is_user = mmu_idx == MMU_USER_IDX;
81 if (mmu_idx == MMU_PHYS_IDX) {
82 full->lg_page_size = TARGET_PAGE_BITS;
83 /* Boot mode: instruction fetches are taken from PROM */
84 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) {
85 full->phys_addr = env->prom_addr | (address & 0x7ffffULL);
86 full->prot = PAGE_READ | PAGE_EXEC;
87 return 0;
89 full->phys_addr = address;
90 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
91 return 0;
94 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1);
95 full->phys_addr = 0xffffffffffff0000ULL;
97 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
98 /* Context base + context number */
99 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
100 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result);
101 if (result != MEMTX_OK) {
102 return 4 << 2; /* Translation fault, L = 0 */
105 /* Ctx pde */
106 switch (pde & PTE_ENTRYTYPE_MASK) {
107 default:
108 case 0: /* Invalid */
109 return 1 << 2;
110 case 2: /* L0 PTE, maybe should not happen? */
111 case 3: /* Reserved */
112 return 4 << 2;
113 case 1: /* L0 PDE */
114 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
115 pde = address_space_ldl(cs->as, pde_ptr,
116 MEMTXATTRS_UNSPECIFIED, &result);
117 if (result != MEMTX_OK) {
118 return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */
121 switch (pde & PTE_ENTRYTYPE_MASK) {
122 default:
123 case 0: /* Invalid */
124 return (1 << 8) | (1 << 2);
125 case 3: /* Reserved */
126 return (1 << 8) | (4 << 2);
127 case 1: /* L1 PDE */
128 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
129 pde = address_space_ldl(cs->as, pde_ptr,
130 MEMTXATTRS_UNSPECIFIED, &result);
131 if (result != MEMTX_OK) {
132 return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */
135 switch (pde & PTE_ENTRYTYPE_MASK) {
136 default:
137 case 0: /* Invalid */
138 return (2 << 8) | (1 << 2);
139 case 3: /* Reserved */
140 return (2 << 8) | (4 << 2);
141 case 1: /* L2 PDE */
142 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
143 pde = address_space_ldl(cs->as, pde_ptr,
144 MEMTXATTRS_UNSPECIFIED, &result);
145 if (result != MEMTX_OK) {
146 return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */
149 switch (pde & PTE_ENTRYTYPE_MASK) {
150 default:
151 case 0: /* Invalid */
152 return (3 << 8) | (1 << 2);
153 case 1: /* PDE, should not happen */
154 case 3: /* Reserved */
155 return (3 << 8) | (4 << 2);
156 case 2: /* L3 PTE */
157 page_offset = 0;
159 full->lg_page_size = TARGET_PAGE_BITS;
160 break;
161 case 2: /* L2 PTE */
162 page_offset = address & 0x3f000;
163 full->lg_page_size = 18;
165 break;
166 case 2: /* L1 PTE */
167 page_offset = address & 0xfff000;
168 full->lg_page_size = 24;
169 break;
173 /* check access */
174 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
175 error_code = access_table[*access_index][access_perms];
176 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) {
177 return error_code;
180 /* update page modified and dirty bits */
181 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
182 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
183 pde |= PG_ACCESSED_MASK;
184 if (is_dirty) {
185 pde |= PG_MODIFIED_MASK;
187 stl_phys_notdirty(cs->as, pde_ptr, pde);
190 /* the page can be put in the TLB */
191 full->prot = perm_table[is_user][access_perms];
192 if (!(pde & PG_MODIFIED_MASK)) {
193 /* only set write access if already dirty... otherwise wait
194 for dirty access */
195 full->prot &= ~PAGE_WRITE;
198 /* Even if large ptes, we map only one 4KB page in the cache to
199 avoid filling it too fast */
200 full->phys_addr = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset;
201 return error_code;
204 /* Perform address translation */
205 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
206 MMUAccessType access_type, int mmu_idx,
207 bool probe, uintptr_t retaddr)
209 CPUSPARCState *env = cpu_env(cs);
210 CPUTLBEntryFull full = {};
211 target_ulong vaddr;
212 int error_code = 0, access_index;
215 * TODO: If we ever need tlb_vaddr_to_host for this target,
216 * then we must figure out how to manipulate FSR and FAR
217 * when both MMU_NF and probe are set. In the meantime,
218 * do not support this use case.
220 assert(!probe);
222 address &= TARGET_PAGE_MASK;
223 error_code = get_physical_address(env, &full, &access_index,
224 address, access_type, mmu_idx);
225 vaddr = address;
226 if (likely(error_code == 0)) {
227 qemu_log_mask(CPU_LOG_MMU,
228 "Translate at %" VADDR_PRIx " -> "
229 HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
230 address, full.phys_addr, vaddr);
231 tlb_set_page_full(cs, mmu_idx, vaddr, &full);
232 return true;
235 if (env->mmuregs[3]) { /* Fault status register */
236 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
238 env->mmuregs[3] |= (access_index << 5) | error_code | 2;
239 env->mmuregs[4] = address; /* Fault address register */
241 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) {
242 /* No fault mode: if a mapping is available, just override
243 permissions. If no mapping is available, redirect accesses to
244 neverland. Fake/overridden mappings will be flushed when
245 switching to normal mode. */
246 full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
247 tlb_set_page_full(cs, mmu_idx, vaddr, &full);
248 return true;
249 } else {
250 if (access_type == MMU_INST_FETCH) {
251 cs->exception_index = TT_TFAULT;
252 } else {
253 cs->exception_index = TT_DFAULT;
255 cpu_loop_exit_restore(cs, retaddr);
259 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
261 CPUState *cs = env_cpu(env);
262 hwaddr pde_ptr;
263 uint32_t pde;
264 MemTxResult result;
267 * TODO: MMU probe operations are supposed to set the fault
268 * status registers, but we don't do this.
271 /* Context base + context number */
272 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
273 (env->mmuregs[2] << 2);
274 pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result);
275 if (result != MEMTX_OK) {
276 return 0;
279 switch (pde & PTE_ENTRYTYPE_MASK) {
280 default:
281 case 0: /* Invalid */
282 case 2: /* PTE, maybe should not happen? */
283 case 3: /* Reserved */
284 return 0;
285 case 1: /* L1 PDE */
286 if (mmulev == 3) {
287 return pde;
289 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
290 pde = address_space_ldl(cs->as, pde_ptr,
291 MEMTXATTRS_UNSPECIFIED, &result);
292 if (result != MEMTX_OK) {
293 return 0;
296 switch (pde & PTE_ENTRYTYPE_MASK) {
297 default:
298 case 0: /* Invalid */
299 case 3: /* Reserved */
300 return 0;
301 case 2: /* L1 PTE */
302 return pde;
303 case 1: /* L2 PDE */
304 if (mmulev == 2) {
305 return pde;
307 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
308 pde = address_space_ldl(cs->as, pde_ptr,
309 MEMTXATTRS_UNSPECIFIED, &result);
310 if (result != MEMTX_OK) {
311 return 0;
314 switch (pde & PTE_ENTRYTYPE_MASK) {
315 default:
316 case 0: /* Invalid */
317 case 3: /* Reserved */
318 return 0;
319 case 2: /* L2 PTE */
320 return pde;
321 case 1: /* L3 PDE */
322 if (mmulev == 1) {
323 return pde;
325 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
326 pde = address_space_ldl(cs->as, pde_ptr,
327 MEMTXATTRS_UNSPECIFIED, &result);
328 if (result != MEMTX_OK) {
329 return 0;
332 switch (pde & PTE_ENTRYTYPE_MASK) {
333 default:
334 case 0: /* Invalid */
335 case 1: /* PDE, should not happen */
336 case 3: /* Reserved */
337 return 0;
338 case 2: /* L3 PTE */
339 return pde;
344 return 0;
347 void dump_mmu(CPUSPARCState *env)
349 CPUState *cs = env_cpu(env);
350 target_ulong va, va1, va2;
351 unsigned int n, m, o;
352 hwaddr pa;
353 uint32_t pde;
355 qemu_printf("Root ptr: " HWADDR_FMT_plx ", ctx: %d\n",
356 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
357 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
358 pde = mmu_probe(env, va, 2);
359 if (pde) {
360 pa = cpu_get_phys_page_debug(cs, va);
361 qemu_printf("VA: " TARGET_FMT_lx ", PA: " HWADDR_FMT_plx
362 " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
363 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
364 pde = mmu_probe(env, va1, 1);
365 if (pde) {
366 pa = cpu_get_phys_page_debug(cs, va1);
367 qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
368 HWADDR_FMT_plx " PDE: " TARGET_FMT_lx "\n",
369 va1, pa, pde);
370 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
371 pde = mmu_probe(env, va2, 0);
372 if (pde) {
373 pa = cpu_get_phys_page_debug(cs, va2);
374 qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
375 HWADDR_FMT_plx " PTE: "
376 TARGET_FMT_lx "\n",
377 va2, pa, pde);
386 /* Gdb expects all registers windows to be flushed in ram. This function handles
387 * reads (and only reads) in stack frames as if windows were flushed. We assume
388 * that the sparc ABI is followed.
390 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
391 uint8_t *buf, int len, bool is_write)
393 CPUSPARCState *env = cpu_env(cs);
394 target_ulong addr = address;
395 int i;
396 int len1;
397 int cwp = env->cwp;
399 if (!is_write) {
400 for (i = 0; i < env->nwindows; i++) {
401 int off;
402 target_ulong fp = env->regbase[cwp * 16 + 22];
404 /* Assume fp == 0 means end of frame. */
405 if (fp == 0) {
406 break;
409 cwp = cpu_cwp_inc(env, cwp + 1);
411 /* Invalid window ? */
412 if (env->wim & (1 << cwp)) {
413 break;
416 /* According to the ABI, the stack is growing downward. */
417 if (addr + len < fp) {
418 break;
421 /* Not in this frame. */
422 if (addr > fp + 64) {
423 continue;
426 /* Handle access before this window. */
427 if (addr < fp) {
428 len1 = fp - addr;
429 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) {
430 return -1;
432 addr += len1;
433 len -= len1;
434 buf += len1;
437 /* Access byte per byte to registers. Not very efficient but speed
438 * is not critical.
440 off = addr - fp;
441 len1 = 64 - off;
443 if (len1 > len) {
444 len1 = len;
447 for (; len1; len1--) {
448 int reg = cwp * 16 + 8 + (off >> 2);
449 union {
450 uint32_t v;
451 uint8_t c[4];
452 } u;
453 u.v = cpu_to_be32(env->regbase[reg]);
454 *buf++ = u.c[off & 3];
455 addr++;
456 len--;
457 off++;
460 if (len == 0) {
461 return 0;
465 return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
468 #else /* !TARGET_SPARC64 */
470 /* 41 bit physical address space */
471 static inline hwaddr ultrasparc_truncate_physical(uint64_t x)
473 return x & 0x1ffffffffffULL;
477 * UltraSparc IIi I/DMMUs
480 /* Returns true if TTE tag is valid and matches virtual address value
481 in context requires virtual address mask value calculated from TTE
482 entry size */
483 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
484 uint64_t address, uint64_t context,
485 hwaddr *physical)
487 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte));
489 /* valid, context match, virtual address match? */
490 if (TTE_IS_VALID(tlb->tte) &&
491 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
492 && compare_masked(address, tlb->tag, mask)) {
493 /* decode physical address */
494 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
495 return 1;
498 return 0;
501 static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw)
503 uint64_t sfsr = SFSR_VALID_BIT;
505 switch (mmu_idx) {
506 case MMU_PHYS_IDX:
507 sfsr |= SFSR_CT_NOTRANS;
508 break;
509 case MMU_USER_IDX:
510 case MMU_KERNEL_IDX:
511 sfsr |= SFSR_CT_PRIMARY;
512 break;
513 case MMU_USER_SECONDARY_IDX:
514 case MMU_KERNEL_SECONDARY_IDX:
515 sfsr |= SFSR_CT_SECONDARY;
516 break;
517 case MMU_NUCLEUS_IDX:
518 sfsr |= SFSR_CT_NUCLEUS;
519 break;
520 default:
521 g_assert_not_reached();
524 if (rw == 1) {
525 sfsr |= SFSR_WRITE_BIT;
526 } else if (rw == 4) {
527 sfsr |= SFSR_NF_BIT;
530 if (env->pstate & PS_PRIV) {
531 sfsr |= SFSR_PR_BIT;
534 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
535 sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */
538 /* FIXME: ASI field in SFSR must be set */
540 return sfsr;
543 static int get_physical_address_data(CPUSPARCState *env, CPUTLBEntryFull *full,
544 target_ulong address, int rw, int mmu_idx)
546 CPUState *cs = env_cpu(env);
547 unsigned int i;
548 uint64_t sfsr;
549 uint64_t context;
550 bool is_user = false;
552 sfsr = build_sfsr(env, mmu_idx, rw);
554 switch (mmu_idx) {
555 case MMU_PHYS_IDX:
556 g_assert_not_reached();
557 case MMU_USER_IDX:
558 is_user = true;
559 /* fallthru */
560 case MMU_KERNEL_IDX:
561 context = env->dmmu.mmu_primary_context & 0x1fff;
562 break;
563 case MMU_USER_SECONDARY_IDX:
564 is_user = true;
565 /* fallthru */
566 case MMU_KERNEL_SECONDARY_IDX:
567 context = env->dmmu.mmu_secondary_context & 0x1fff;
568 break;
569 default:
570 context = 0;
571 break;
574 for (i = 0; i < 64; i++) {
575 /* ctx match, vaddr match, valid? */
576 if (ultrasparc_tag_match(&env->dtlb[i], address, context,
577 &full->phys_addr)) {
578 int do_fault = 0;
580 if (TTE_IS_IE(env->dtlb[i].tte)) {
581 full->tlb_fill_flags |= TLB_BSWAP;
584 /* access ok? */
585 /* multiple bits in SFSR.FT may be set on TT_DFAULT */
586 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
587 do_fault = 1;
588 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
589 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl);
591 if (rw == 4) {
592 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
593 do_fault = 1;
594 sfsr |= SFSR_FT_NF_E_BIT;
596 } else {
597 if (TTE_IS_NFO(env->dtlb[i].tte)) {
598 do_fault = 1;
599 sfsr |= SFSR_FT_NFO_BIT;
603 if (do_fault) {
604 /* faults above are reported with TT_DFAULT. */
605 cs->exception_index = TT_DFAULT;
606 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
607 do_fault = 1;
608 cs->exception_index = TT_DPROT;
610 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
613 if (!do_fault) {
614 full->prot = PAGE_READ;
615 if (TTE_IS_W_OK(env->dtlb[i].tte)) {
616 full->prot |= PAGE_WRITE;
619 TTE_SET_USED(env->dtlb[i].tte);
621 return 0;
624 env->dmmu.sfsr = sfsr;
625 env->dmmu.sfar = address; /* Fault address register */
626 env->dmmu.tag_access = (address & ~0x1fffULL) | context;
627 return 1;
631 trace_mmu_helper_dmiss(address, context);
634 * On MMU misses:
635 * - UltraSPARC IIi: SFSR and SFAR unmodified
636 * - JPS1: SFAR updated and some fields of SFSR updated
638 env->dmmu.tag_access = (address & ~0x1fffULL) | context;
639 cs->exception_index = TT_DMISS;
640 return 1;
643 static int get_physical_address_code(CPUSPARCState *env, CPUTLBEntryFull *full,
644 target_ulong address, int mmu_idx)
646 CPUState *cs = env_cpu(env);
647 unsigned int i;
648 uint64_t context;
649 bool is_user = false;
651 switch (mmu_idx) {
652 case MMU_PHYS_IDX:
653 case MMU_USER_SECONDARY_IDX:
654 case MMU_KERNEL_SECONDARY_IDX:
655 g_assert_not_reached();
656 case MMU_USER_IDX:
657 is_user = true;
658 /* fallthru */
659 case MMU_KERNEL_IDX:
660 context = env->dmmu.mmu_primary_context & 0x1fff;
661 break;
662 default:
663 context = 0;
664 break;
667 if (env->tl == 0) {
668 /* PRIMARY context */
669 context = env->dmmu.mmu_primary_context & 0x1fff;
670 } else {
671 /* NUCLEUS context */
672 context = 0;
675 for (i = 0; i < 64; i++) {
676 /* ctx match, vaddr match, valid? */
677 if (ultrasparc_tag_match(&env->itlb[i],
678 address, context, &full->phys_addr)) {
679 /* access ok? */
680 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
681 /* Fault status register */
682 if (env->immu.sfsr & SFSR_VALID_BIT) {
683 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
684 another fault) */
685 } else {
686 env->immu.sfsr = 0;
688 if (env->pstate & PS_PRIV) {
689 env->immu.sfsr |= SFSR_PR_BIT;
691 if (env->tl > 0) {
692 env->immu.sfsr |= SFSR_CT_NUCLEUS;
695 /* FIXME: ASI field in SFSR must be set */
696 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
697 cs->exception_index = TT_TFAULT;
699 env->immu.tag_access = (address & ~0x1fffULL) | context;
701 trace_mmu_helper_tfault(address, context);
703 return 1;
705 full->prot = PAGE_EXEC;
706 TTE_SET_USED(env->itlb[i].tte);
707 return 0;
711 trace_mmu_helper_tmiss(address, context);
713 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
714 env->immu.tag_access = (address & ~0x1fffULL) | context;
715 cs->exception_index = TT_TMISS;
716 return 1;
719 static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full,
720 int *access_index, target_ulong address,
721 int rw, int mmu_idx)
723 /* ??? We treat everything as a small page, then explicitly flush
724 everything when an entry is evicted. */
725 full->lg_page_size = TARGET_PAGE_BITS;
727 /* safety net to catch wrong softmmu index use from dynamic code */
728 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
729 if (rw == 2) {
730 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx,
731 env->dmmu.mmu_primary_context,
732 env->dmmu.mmu_secondary_context,
733 address);
734 } else {
735 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx,
736 env->dmmu.mmu_primary_context,
737 env->dmmu.mmu_secondary_context,
738 address);
742 if (mmu_idx == MMU_PHYS_IDX) {
743 full->phys_addr = ultrasparc_truncate_physical(address);
744 full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
745 return 0;
748 if (rw == 2) {
749 return get_physical_address_code(env, full, address, mmu_idx);
750 } else {
751 return get_physical_address_data(env, full, address, rw, mmu_idx);
755 /* Perform address translation */
756 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
757 MMUAccessType access_type, int mmu_idx,
758 bool probe, uintptr_t retaddr)
760 CPUSPARCState *env = cpu_env(cs);
761 CPUTLBEntryFull full = {};
762 int error_code = 0, access_index;
764 address &= TARGET_PAGE_MASK;
765 error_code = get_physical_address(env, &full, &access_index,
766 address, access_type, mmu_idx);
767 if (likely(error_code == 0)) {
768 trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl,
769 env->dmmu.mmu_primary_context,
770 env->dmmu.mmu_secondary_context);
771 tlb_set_page_full(cs, mmu_idx, address, &full);
772 return true;
774 if (probe) {
775 return false;
777 cpu_loop_exit_restore(cs, retaddr);
780 void dump_mmu(CPUSPARCState *env)
782 unsigned int i;
783 const char *mask;
785 qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %"
786 PRId64 "\n",
787 env->dmmu.mmu_primary_context,
788 env->dmmu.mmu_secondary_context);
789 qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64
790 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target);
791 if ((env->lsu & DMMU_E) == 0) {
792 qemu_printf("DMMU disabled\n");
793 } else {
794 qemu_printf("DMMU dump\n");
795 for (i = 0; i < 64; i++) {
796 switch (TTE_PGSIZE(env->dtlb[i].tte)) {
797 default:
798 case 0x0:
799 mask = " 8k";
800 break;
801 case 0x1:
802 mask = " 64k";
803 break;
804 case 0x2:
805 mask = "512k";
806 break;
807 case 0x3:
808 mask = " 4M";
809 break;
811 if (TTE_IS_VALID(env->dtlb[i].tte)) {
812 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
813 ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n",
815 env->dtlb[i].tag & (uint64_t)~0x1fffULL,
816 TTE_PA(env->dtlb[i].tte),
817 mask,
818 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
819 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
820 TTE_IS_LOCKED(env->dtlb[i].tte) ?
821 "locked" : "unlocked",
822 TTE_IS_IE(env->dtlb[i].tte) ?
823 "yes" : "no",
824 env->dtlb[i].tag & (uint64_t)0x1fffULL,
825 TTE_IS_GLOBAL(env->dtlb[i].tte) ?
826 "global" : "local");
830 if ((env->lsu & IMMU_E) == 0) {
831 qemu_printf("IMMU disabled\n");
832 } else {
833 qemu_printf("IMMU dump\n");
834 for (i = 0; i < 64; i++) {
835 switch (TTE_PGSIZE(env->itlb[i].tte)) {
836 default:
837 case 0x0:
838 mask = " 8k";
839 break;
840 case 0x1:
841 mask = " 64k";
842 break;
843 case 0x2:
844 mask = "512k";
845 break;
846 case 0x3:
847 mask = " 4M";
848 break;
850 if (TTE_IS_VALID(env->itlb[i].tte)) {
851 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
852 ", %s, %s, %s, ctx %" PRId64 " %s\n",
854 env->itlb[i].tag & (uint64_t)~0x1fffULL,
855 TTE_PA(env->itlb[i].tte),
856 mask,
857 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
858 TTE_IS_LOCKED(env->itlb[i].tte) ?
859 "locked" : "unlocked",
860 env->itlb[i].tag & (uint64_t)0x1fffULL,
861 TTE_IS_GLOBAL(env->itlb[i].tte) ?
862 "global" : "local");
868 #endif /* TARGET_SPARC64 */
870 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
871 target_ulong addr, int rw, int mmu_idx)
873 CPUTLBEntryFull full = {};
874 int access_index, ret;
876 ret = get_physical_address(env, &full, &access_index, addr, rw, mmu_idx);
877 if (ret == 0) {
878 *phys = full.phys_addr;
880 return ret;
883 #if defined(TARGET_SPARC64)
884 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
885 int mmu_idx)
887 hwaddr phys_addr;
889 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
890 return -1;
892 return phys_addr;
894 #endif
896 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
898 CPUSPARCState *env = cpu_env(cs);
899 hwaddr phys_addr;
900 int mmu_idx = cpu_mmu_index(cs, false);
902 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
903 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
904 return -1;
907 return phys_addr;
910 G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
911 MMUAccessType access_type,
912 int mmu_idx,
913 uintptr_t retaddr)
915 CPUSPARCState *env = cpu_env(cs);
917 #ifdef TARGET_SPARC64
918 env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type);
919 env->dmmu.sfar = addr;
920 #else
921 env->mmuregs[4] = addr;
922 #endif
924 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);