ui: refactor code for determining if an update should be sent to the client
[qemu/ar7.git] / target / sparc / ldst_helper.c
blobfb489cb5fd608327269002a045323c79d19589d6
1 /*
2 * Helpers for loads and stores
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "tcg.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "asi.h"
28 //#define DEBUG_MMU
29 //#define DEBUG_MXCC
30 //#define DEBUG_UNALIGNED
31 //#define DEBUG_UNASSIGNED
32 //#define DEBUG_ASI
33 //#define DEBUG_CACHE_CONTROL
35 #ifdef DEBUG_MMU
36 #define DPRINTF_MMU(fmt, ...) \
37 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
38 #else
39 #define DPRINTF_MMU(fmt, ...) do {} while (0)
40 #endif
42 #ifdef DEBUG_MXCC
43 #define DPRINTF_MXCC(fmt, ...) \
44 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
45 #else
46 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
47 #endif
49 #ifdef DEBUG_ASI
50 #define DPRINTF_ASI(fmt, ...) \
51 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
52 #endif
54 #ifdef DEBUG_CACHE_CONTROL
55 #define DPRINTF_CACHE_CONTROL(fmt, ...) \
56 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
57 #else
58 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
59 #endif
61 #ifdef TARGET_SPARC64
62 #ifndef TARGET_ABI32
63 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
64 #else
65 #define AM_CHECK(env1) (1)
66 #endif
67 #endif
69 #define QT0 (env->qt0)
70 #define QT1 (env->qt1)
72 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
73 /* Calculates TSB pointer value for fault page size
74 * UltraSPARC IIi has fixed sizes (8k or 64k) for the page pointers
75 * UA2005 holds the page size configuration in mmu_ctx registers */
76 static uint64_t ultrasparc_tsb_pointer(CPUSPARCState *env,
77 const SparcV9MMU *mmu, const int idx)
79 uint64_t tsb_register;
80 int page_size;
81 if (cpu_has_hypervisor(env)) {
82 int tsb_index = 0;
83 int ctx = mmu->tag_access & 0x1fffULL;
84 uint64_t ctx_register = mmu->sun4v_ctx_config[ctx ? 1 : 0];
85 tsb_index = idx;
86 tsb_index |= ctx ? 2 : 0;
87 page_size = idx ? ctx_register >> 8 : ctx_register;
88 page_size &= 7;
89 tsb_register = mmu->sun4v_tsb_pointers[tsb_index];
90 } else {
91 page_size = idx;
92 tsb_register = mmu->tsb;
94 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
95 int tsb_size = tsb_register & 0xf;
97 uint64_t tsb_base_mask = (~0x1fffULL) << tsb_size;
99 /* move va bits to correct position,
100 * the context bits will be masked out later */
101 uint64_t va = mmu->tag_access >> (3 * page_size + 9);
103 /* calculate tsb_base mask and adjust va if split is in use */
104 if (tsb_split) {
105 if (idx == 0) {
106 va &= ~(1ULL << (13 + tsb_size));
107 } else {
108 va |= (1ULL << (13 + tsb_size));
110 tsb_base_mask <<= 1;
113 return ((tsb_register & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
116 /* Calculates tag target register value by reordering bits
117 in tag access register */
118 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
120 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
123 static void replace_tlb_entry(SparcTLBEntry *tlb,
124 uint64_t tlb_tag, uint64_t tlb_tte,
125 CPUSPARCState *env1)
127 target_ulong mask, size, va, offset;
129 /* flush page range if translation is valid */
130 if (TTE_IS_VALID(tlb->tte)) {
131 CPUState *cs = CPU(sparc_env_get_cpu(env1));
133 size = 8192ULL << 3 * TTE_PGSIZE(tlb->tte);
134 mask = 1ULL + ~size;
136 va = tlb->tag & mask;
138 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
139 tlb_flush_page(cs, va + offset);
143 tlb->tag = tlb_tag;
144 tlb->tte = tlb_tte;
147 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
148 const char *strmmu, CPUSPARCState *env1)
150 unsigned int i;
151 target_ulong mask;
152 uint64_t context;
154 int is_demap_context = (demap_addr >> 6) & 1;
156 /* demap context */
157 switch ((demap_addr >> 4) & 3) {
158 case 0: /* primary */
159 context = env1->dmmu.mmu_primary_context;
160 break;
161 case 1: /* secondary */
162 context = env1->dmmu.mmu_secondary_context;
163 break;
164 case 2: /* nucleus */
165 context = 0;
166 break;
167 case 3: /* reserved */
168 default:
169 return;
172 for (i = 0; i < 64; i++) {
173 if (TTE_IS_VALID(tlb[i].tte)) {
175 if (is_demap_context) {
176 /* will remove non-global entries matching context value */
177 if (TTE_IS_GLOBAL(tlb[i].tte) ||
178 !tlb_compare_context(&tlb[i], context)) {
179 continue;
181 } else {
182 /* demap page
183 will remove any entry matching VA */
184 mask = 0xffffffffffffe000ULL;
185 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
187 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
188 continue;
191 /* entry should be global or matching context value */
192 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
193 !tlb_compare_context(&tlb[i], context)) {
194 continue;
198 replace_tlb_entry(&tlb[i], 0, 0, env1);
199 #ifdef DEBUG_MMU
200 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
201 dump_mmu(stdout, fprintf, env1);
202 #endif
207 static uint64_t sun4v_tte_to_sun4u(CPUSPARCState *env, uint64_t tag,
208 uint64_t sun4v_tte)
210 uint64_t sun4u_tte;
211 if (!(cpu_has_hypervisor(env) && (tag & TLB_UST1_IS_SUN4V_BIT))) {
212 /* is already in the sun4u format */
213 return sun4v_tte;
215 sun4u_tte = TTE_PA(sun4v_tte) | (sun4v_tte & TTE_VALID_BIT);
216 sun4u_tte |= (sun4v_tte & 3ULL) << 61; /* TTE_PGSIZE */
217 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_NFO_BIT_UA2005, TTE_NFO_BIT);
218 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_USED_BIT_UA2005, TTE_USED_BIT);
219 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_W_OK_BIT_UA2005, TTE_W_OK_BIT);
220 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_SIDEEFFECT_BIT_UA2005,
221 TTE_SIDEEFFECT_BIT);
222 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_PRIV_BIT_UA2005, TTE_PRIV_BIT);
223 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_LOCKED_BIT_UA2005, TTE_LOCKED_BIT);
224 return sun4u_tte;
227 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
228 uint64_t tlb_tag, uint64_t tlb_tte,
229 const char *strmmu, CPUSPARCState *env1,
230 uint64_t addr)
232 unsigned int i, replace_used;
234 tlb_tte = sun4v_tte_to_sun4u(env1, addr, tlb_tte);
235 if (cpu_has_hypervisor(env1)) {
236 uint64_t new_vaddr = tlb_tag & ~0x1fffULL;
237 uint64_t new_size = 8192ULL << 3 * TTE_PGSIZE(tlb_tte);
238 uint32_t new_ctx = tlb_tag & 0x1fffU;
239 for (i = 0; i < 64; i++) {
240 uint32_t ctx = tlb[i].tag & 0x1fffU;
241 /* check if new mapping overlaps an existing one */
242 if (new_ctx == ctx) {
243 uint64_t vaddr = tlb[i].tag & ~0x1fffULL;
244 uint64_t size = 8192ULL << 3 * TTE_PGSIZE(tlb[i].tte);
245 if (new_vaddr == vaddr
246 || (new_vaddr < vaddr + size
247 && vaddr < new_vaddr + new_size)) {
248 DPRINTF_MMU("auto demap entry [%d] %lx->%lx\n", i, vaddr,
249 new_vaddr);
250 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
251 return;
257 /* Try replacing invalid entry */
258 for (i = 0; i < 64; i++) {
259 if (!TTE_IS_VALID(tlb[i].tte)) {
260 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
261 #ifdef DEBUG_MMU
262 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
263 dump_mmu(stdout, fprintf, env1);
264 #endif
265 return;
269 /* All entries are valid, try replacing unlocked entry */
271 for (replace_used = 0; replace_used < 2; ++replace_used) {
273 /* Used entries are not replaced on first pass */
275 for (i = 0; i < 64; i++) {
276 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
278 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
279 #ifdef DEBUG_MMU
280 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
281 strmmu, (replace_used ? "used" : "unused"), i);
282 dump_mmu(stdout, fprintf, env1);
283 #endif
284 return;
288 /* Now reset used bit and search for unused entries again */
290 for (i = 0; i < 64; i++) {
291 TTE_SET_UNUSED(tlb[i].tte);
295 #ifdef DEBUG_MMU
296 DPRINTF_MMU("%s lru replacement: no free entries available, "
297 "replacing the last one\n", strmmu);
298 #endif
299 /* corner case: the last entry is replaced anyway */
300 replace_tlb_entry(&tlb[63], tlb_tag, tlb_tte, env1);
303 #endif
305 #ifdef TARGET_SPARC64
306 /* returns true if access using this ASI is to have address translated by MMU
307 otherwise access is to raw physical address */
308 /* TODO: check sparc32 bits */
309 static inline int is_translating_asi(int asi)
311 /* Ultrasparc IIi translating asi
312 - note this list is defined by cpu implementation
314 switch (asi) {
315 case 0x04 ... 0x11:
316 case 0x16 ... 0x19:
317 case 0x1E ... 0x1F:
318 case 0x24 ... 0x2C:
319 case 0x70 ... 0x73:
320 case 0x78 ... 0x79:
321 case 0x80 ... 0xFF:
322 return 1;
324 default:
325 return 0;
329 static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr)
331 if (AM_CHECK(env1)) {
332 addr &= 0xffffffffULL;
334 return addr;
337 static inline target_ulong asi_address_mask(CPUSPARCState *env,
338 int asi, target_ulong addr)
340 if (is_translating_asi(asi)) {
341 addr = address_mask(env, addr);
343 return addr;
346 #ifndef CONFIG_USER_ONLY
347 static inline void do_check_asi(CPUSPARCState *env, int asi, uintptr_t ra)
349 /* ASIs >= 0x80 are user mode.
350 * ASIs >= 0x30 are hyper mode (or super if hyper is not available).
351 * ASIs <= 0x2f are super mode.
353 if (asi < 0x80
354 && !cpu_hypervisor_mode(env)
355 && (!cpu_supervisor_mode(env)
356 || (asi >= 0x30 && cpu_has_hypervisor(env)))) {
357 cpu_raise_exception_ra(env, TT_PRIV_ACT, ra);
360 #endif /* !CONFIG_USER_ONLY */
361 #endif
363 static void do_check_align(CPUSPARCState *env, target_ulong addr,
364 uint32_t align, uintptr_t ra)
366 if (addr & align) {
367 #ifdef DEBUG_UNALIGNED
368 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
369 "\n", addr, env->pc);
370 #endif
371 cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
375 void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align)
377 do_check_align(env, addr, align, GETPC());
380 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
381 defined(DEBUG_MXCC)
382 static void dump_mxcc(CPUSPARCState *env)
384 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
385 "\n",
386 env->mxccdata[0], env->mxccdata[1],
387 env->mxccdata[2], env->mxccdata[3]);
388 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
389 "\n"
390 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
391 "\n",
392 env->mxccregs[0], env->mxccregs[1],
393 env->mxccregs[2], env->mxccregs[3],
394 env->mxccregs[4], env->mxccregs[5],
395 env->mxccregs[6], env->mxccregs[7]);
397 #endif
399 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
400 && defined(DEBUG_ASI)
401 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
402 uint64_t r1)
404 switch (size) {
405 case 1:
406 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
407 addr, asi, r1 & 0xff);
408 break;
409 case 2:
410 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
411 addr, asi, r1 & 0xffff);
412 break;
413 case 4:
414 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
415 addr, asi, r1 & 0xffffffff);
416 break;
417 case 8:
418 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
419 addr, asi, r1);
420 break;
423 #endif
425 #ifndef TARGET_SPARC64
426 #ifndef CONFIG_USER_ONLY
429 /* Leon3 cache control */
431 static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr,
432 uint64_t val, int size)
434 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
435 addr, val, size);
437 if (size != 4) {
438 DPRINTF_CACHE_CONTROL("32bits only\n");
439 return;
442 switch (addr) {
443 case 0x00: /* Cache control */
445 /* These values must always be read as zeros */
446 val &= ~CACHE_CTRL_FD;
447 val &= ~CACHE_CTRL_FI;
448 val &= ~CACHE_CTRL_IB;
449 val &= ~CACHE_CTRL_IP;
450 val &= ~CACHE_CTRL_DP;
452 env->cache_control = val;
453 break;
454 case 0x04: /* Instruction cache configuration */
455 case 0x08: /* Data cache configuration */
456 /* Read Only */
457 break;
458 default:
459 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
460 break;
464 static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
465 int size)
467 uint64_t ret = 0;
469 if (size != 4) {
470 DPRINTF_CACHE_CONTROL("32bits only\n");
471 return 0;
474 switch (addr) {
475 case 0x00: /* Cache control */
476 ret = env->cache_control;
477 break;
479 /* Configuration registers are read and only always keep those
480 predefined values */
482 case 0x04: /* Instruction cache configuration */
483 ret = 0x10220000;
484 break;
485 case 0x08: /* Data cache configuration */
486 ret = 0x18220000;
487 break;
488 default:
489 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
490 break;
492 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
493 addr, ret, size);
494 return ret;
497 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
498 int asi, uint32_t memop)
500 int size = 1 << (memop & MO_SIZE);
501 int sign = memop & MO_SIGN;
502 CPUState *cs = CPU(sparc_env_get_cpu(env));
503 uint64_t ret = 0;
504 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
505 uint32_t last_addr = addr;
506 #endif
508 do_check_align(env, addr, size - 1, GETPC());
509 switch (asi) {
510 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
511 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
512 switch (addr) {
513 case 0x00: /* Leon3 Cache Control */
514 case 0x08: /* Leon3 Instruction Cache config */
515 case 0x0C: /* Leon3 Date Cache config */
516 if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
517 ret = leon3_cache_control_ld(env, addr, size);
519 break;
520 case 0x01c00a00: /* MXCC control register */
521 if (size == 8) {
522 ret = env->mxccregs[3];
523 } else {
524 qemu_log_mask(LOG_UNIMP,
525 "%08x: unimplemented access size: %d\n", addr,
526 size);
528 break;
529 case 0x01c00a04: /* MXCC control register */
530 if (size == 4) {
531 ret = env->mxccregs[3];
532 } else {
533 qemu_log_mask(LOG_UNIMP,
534 "%08x: unimplemented access size: %d\n", addr,
535 size);
537 break;
538 case 0x01c00c00: /* Module reset register */
539 if (size == 8) {
540 ret = env->mxccregs[5];
541 /* should we do something here? */
542 } else {
543 qemu_log_mask(LOG_UNIMP,
544 "%08x: unimplemented access size: %d\n", addr,
545 size);
547 break;
548 case 0x01c00f00: /* MBus port address register */
549 if (size == 8) {
550 ret = env->mxccregs[7];
551 } else {
552 qemu_log_mask(LOG_UNIMP,
553 "%08x: unimplemented access size: %d\n", addr,
554 size);
556 break;
557 default:
558 qemu_log_mask(LOG_UNIMP,
559 "%08x: unimplemented address, size: %d\n", addr,
560 size);
561 break;
563 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
564 "addr = %08x -> ret = %" PRIx64 ","
565 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
566 #ifdef DEBUG_MXCC
567 dump_mxcc(env);
568 #endif
569 break;
570 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */
571 case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */
573 int mmulev;
575 mmulev = (addr >> 8) & 15;
576 if (mmulev > 4) {
577 ret = 0;
578 } else {
579 ret = mmu_probe(env, addr, mmulev);
581 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
582 addr, mmulev, ret);
584 break;
585 case ASI_M_MMUREGS: /* SuperSparc MMU regs */
586 case ASI_LEON_MMUREGS: /* LEON3 MMU regs */
588 int reg = (addr >> 8) & 0x1f;
590 ret = env->mmuregs[reg];
591 if (reg == 3) { /* Fault status cleared on read */
592 env->mmuregs[3] = 0;
593 } else if (reg == 0x13) { /* Fault status read */
594 ret = env->mmuregs[3];
595 } else if (reg == 0x14) { /* Fault address read */
596 ret = env->mmuregs[4];
598 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
600 break;
601 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
602 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
603 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
604 break;
605 case ASI_KERNELTXT: /* Supervisor code access */
606 switch (size) {
607 case 1:
608 ret = cpu_ldub_code(env, addr);
609 break;
610 case 2:
611 ret = cpu_lduw_code(env, addr);
612 break;
613 default:
614 case 4:
615 ret = cpu_ldl_code(env, addr);
616 break;
617 case 8:
618 ret = cpu_ldq_code(env, addr);
619 break;
621 break;
622 case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */
623 case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */
624 case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */
625 case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */
626 break;
627 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
628 switch (size) {
629 case 1:
630 ret = ldub_phys(cs->as, (hwaddr)addr
631 | ((hwaddr)(asi & 0xf) << 32));
632 break;
633 case 2:
634 ret = lduw_phys(cs->as, (hwaddr)addr
635 | ((hwaddr)(asi & 0xf) << 32));
636 break;
637 default:
638 case 4:
639 ret = ldl_phys(cs->as, (hwaddr)addr
640 | ((hwaddr)(asi & 0xf) << 32));
641 break;
642 case 8:
643 ret = ldq_phys(cs->as, (hwaddr)addr
644 | ((hwaddr)(asi & 0xf) << 32));
645 break;
647 break;
648 case 0x30: /* Turbosparc secondary cache diagnostic */
649 case 0x31: /* Turbosparc RAM snoop */
650 case 0x32: /* Turbosparc page table descriptor diagnostic */
651 case 0x39: /* data cache diagnostic register */
652 ret = 0;
653 break;
654 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
656 int reg = (addr >> 8) & 3;
658 switch (reg) {
659 case 0: /* Breakpoint Value (Addr) */
660 ret = env->mmubpregs[reg];
661 break;
662 case 1: /* Breakpoint Mask */
663 ret = env->mmubpregs[reg];
664 break;
665 case 2: /* Breakpoint Control */
666 ret = env->mmubpregs[reg];
667 break;
668 case 3: /* Breakpoint Status */
669 ret = env->mmubpregs[reg];
670 env->mmubpregs[reg] = 0ULL;
671 break;
673 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
674 ret);
676 break;
677 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
678 ret = env->mmubpctrv;
679 break;
680 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
681 ret = env->mmubpctrc;
682 break;
683 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
684 ret = env->mmubpctrs;
685 break;
686 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
687 ret = env->mmubpaction;
688 break;
689 case ASI_USERTXT: /* User code access, XXX */
690 default:
691 cpu_unassigned_access(cs, addr, false, false, asi, size);
692 ret = 0;
693 break;
695 case ASI_USERDATA: /* User data access */
696 case ASI_KERNELDATA: /* Supervisor data access */
697 case ASI_P: /* Implicit primary context data access (v9 only?) */
698 case ASI_M_BYPASS: /* MMU passthrough */
699 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
700 /* These are always handled inline. */
701 g_assert_not_reached();
703 if (sign) {
704 switch (size) {
705 case 1:
706 ret = (int8_t) ret;
707 break;
708 case 2:
709 ret = (int16_t) ret;
710 break;
711 case 4:
712 ret = (int32_t) ret;
713 break;
714 default:
715 break;
718 #ifdef DEBUG_ASI
719 dump_asi("read ", last_addr, asi, size, ret);
720 #endif
721 return ret;
724 void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
725 int asi, uint32_t memop)
727 int size = 1 << (memop & MO_SIZE);
728 SPARCCPU *cpu = sparc_env_get_cpu(env);
729 CPUState *cs = CPU(cpu);
731 do_check_align(env, addr, size - 1, GETPC());
732 switch (asi) {
733 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
734 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
735 switch (addr) {
736 case 0x00: /* Leon3 Cache Control */
737 case 0x08: /* Leon3 Instruction Cache config */
738 case 0x0C: /* Leon3 Date Cache config */
739 if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
740 leon3_cache_control_st(env, addr, val, size);
742 break;
744 case 0x01c00000: /* MXCC stream data register 0 */
745 if (size == 8) {
746 env->mxccdata[0] = val;
747 } else {
748 qemu_log_mask(LOG_UNIMP,
749 "%08x: unimplemented access size: %d\n", addr,
750 size);
752 break;
753 case 0x01c00008: /* MXCC stream data register 1 */
754 if (size == 8) {
755 env->mxccdata[1] = val;
756 } else {
757 qemu_log_mask(LOG_UNIMP,
758 "%08x: unimplemented access size: %d\n", addr,
759 size);
761 break;
762 case 0x01c00010: /* MXCC stream data register 2 */
763 if (size == 8) {
764 env->mxccdata[2] = val;
765 } else {
766 qemu_log_mask(LOG_UNIMP,
767 "%08x: unimplemented access size: %d\n", addr,
768 size);
770 break;
771 case 0x01c00018: /* MXCC stream data register 3 */
772 if (size == 8) {
773 env->mxccdata[3] = val;
774 } else {
775 qemu_log_mask(LOG_UNIMP,
776 "%08x: unimplemented access size: %d\n", addr,
777 size);
779 break;
780 case 0x01c00100: /* MXCC stream source */
781 if (size == 8) {
782 env->mxccregs[0] = val;
783 } else {
784 qemu_log_mask(LOG_UNIMP,
785 "%08x: unimplemented access size: %d\n", addr,
786 size);
788 env->mxccdata[0] = ldq_phys(cs->as,
789 (env->mxccregs[0] & 0xffffffffULL) +
791 env->mxccdata[1] = ldq_phys(cs->as,
792 (env->mxccregs[0] & 0xffffffffULL) +
794 env->mxccdata[2] = ldq_phys(cs->as,
795 (env->mxccregs[0] & 0xffffffffULL) +
796 16);
797 env->mxccdata[3] = ldq_phys(cs->as,
798 (env->mxccregs[0] & 0xffffffffULL) +
799 24);
800 break;
801 case 0x01c00200: /* MXCC stream destination */
802 if (size == 8) {
803 env->mxccregs[1] = val;
804 } else {
805 qemu_log_mask(LOG_UNIMP,
806 "%08x: unimplemented access size: %d\n", addr,
807 size);
809 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 0,
810 env->mxccdata[0]);
811 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 8,
812 env->mxccdata[1]);
813 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16,
814 env->mxccdata[2]);
815 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24,
816 env->mxccdata[3]);
817 break;
818 case 0x01c00a00: /* MXCC control register */
819 if (size == 8) {
820 env->mxccregs[3] = val;
821 } else {
822 qemu_log_mask(LOG_UNIMP,
823 "%08x: unimplemented access size: %d\n", addr,
824 size);
826 break;
827 case 0x01c00a04: /* MXCC control register */
828 if (size == 4) {
829 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
830 | val;
831 } else {
832 qemu_log_mask(LOG_UNIMP,
833 "%08x: unimplemented access size: %d\n", addr,
834 size);
836 break;
837 case 0x01c00e00: /* MXCC error register */
838 /* writing a 1 bit clears the error */
839 if (size == 8) {
840 env->mxccregs[6] &= ~val;
841 } else {
842 qemu_log_mask(LOG_UNIMP,
843 "%08x: unimplemented access size: %d\n", addr,
844 size);
846 break;
847 case 0x01c00f00: /* MBus port address register */
848 if (size == 8) {
849 env->mxccregs[7] = val;
850 } else {
851 qemu_log_mask(LOG_UNIMP,
852 "%08x: unimplemented access size: %d\n", addr,
853 size);
855 break;
856 default:
857 qemu_log_mask(LOG_UNIMP,
858 "%08x: unimplemented address, size: %d\n", addr,
859 size);
860 break;
862 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
863 asi, size, addr, val);
864 #ifdef DEBUG_MXCC
865 dump_mxcc(env);
866 #endif
867 break;
868 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */
869 case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */
871 int mmulev;
873 mmulev = (addr >> 8) & 15;
874 DPRINTF_MMU("mmu flush level %d\n", mmulev);
875 switch (mmulev) {
876 case 0: /* flush page */
877 tlb_flush_page(CPU(cpu), addr & 0xfffff000);
878 break;
879 case 1: /* flush segment (256k) */
880 case 2: /* flush region (16M) */
881 case 3: /* flush context (4G) */
882 case 4: /* flush entire */
883 tlb_flush(CPU(cpu));
884 break;
885 default:
886 break;
888 #ifdef DEBUG_MMU
889 dump_mmu(stdout, fprintf, env);
890 #endif
892 break;
893 case ASI_M_MMUREGS: /* write MMU regs */
894 case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */
896 int reg = (addr >> 8) & 0x1f;
897 uint32_t oldreg;
899 oldreg = env->mmuregs[reg];
900 switch (reg) {
901 case 0: /* Control Register */
902 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
903 (val & 0x00ffffff);
904 /* Mappings generated during no-fault mode
905 are invalid in normal mode. */
906 if ((oldreg ^ env->mmuregs[reg])
907 & (MMU_NF | env->def.mmu_bm)) {
908 tlb_flush(CPU(cpu));
910 break;
911 case 1: /* Context Table Pointer Register */
912 env->mmuregs[reg] = val & env->def.mmu_ctpr_mask;
913 break;
914 case 2: /* Context Register */
915 env->mmuregs[reg] = val & env->def.mmu_cxr_mask;
916 if (oldreg != env->mmuregs[reg]) {
917 /* we flush when the MMU context changes because
918 QEMU has no MMU context support */
919 tlb_flush(CPU(cpu));
921 break;
922 case 3: /* Synchronous Fault Status Register with Clear */
923 case 4: /* Synchronous Fault Address Register */
924 break;
925 case 0x10: /* TLB Replacement Control Register */
926 env->mmuregs[reg] = val & env->def.mmu_trcr_mask;
927 break;
928 case 0x13: /* Synchronous Fault Status Register with Read
929 and Clear */
930 env->mmuregs[3] = val & env->def.mmu_sfsr_mask;
931 break;
932 case 0x14: /* Synchronous Fault Address Register */
933 env->mmuregs[4] = val;
934 break;
935 default:
936 env->mmuregs[reg] = val;
937 break;
939 if (oldreg != env->mmuregs[reg]) {
940 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
941 reg, oldreg, env->mmuregs[reg]);
943 #ifdef DEBUG_MMU
944 dump_mmu(stdout, fprintf, env);
945 #endif
947 break;
948 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
949 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
950 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
951 break;
952 case ASI_M_TXTC_TAG: /* I-cache tag */
953 case ASI_M_TXTC_DATA: /* I-cache data */
954 case ASI_M_DATAC_TAG: /* D-cache tag */
955 case ASI_M_DATAC_DATA: /* D-cache data */
956 case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */
957 case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */
958 case ASI_M_FLUSH_REGION: /* I/D-cache flush region */
959 case ASI_M_FLUSH_CTX: /* I/D-cache flush context */
960 case ASI_M_FLUSH_USER: /* I/D-cache flush user */
961 break;
962 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
964 switch (size) {
965 case 1:
966 stb_phys(cs->as, (hwaddr)addr
967 | ((hwaddr)(asi & 0xf) << 32), val);
968 break;
969 case 2:
970 stw_phys(cs->as, (hwaddr)addr
971 | ((hwaddr)(asi & 0xf) << 32), val);
972 break;
973 case 4:
974 default:
975 stl_phys(cs->as, (hwaddr)addr
976 | ((hwaddr)(asi & 0xf) << 32), val);
977 break;
978 case 8:
979 stq_phys(cs->as, (hwaddr)addr
980 | ((hwaddr)(asi & 0xf) << 32), val);
981 break;
984 break;
985 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
986 case 0x31: /* store buffer data, Ross RT620 I-cache flush or
987 Turbosparc snoop RAM */
988 case 0x32: /* store buffer control or Turbosparc page table
989 descriptor diagnostic */
990 case 0x36: /* I-cache flash clear */
991 case 0x37: /* D-cache flash clear */
992 break;
993 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
995 int reg = (addr >> 8) & 3;
997 switch (reg) {
998 case 0: /* Breakpoint Value (Addr) */
999 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1000 break;
1001 case 1: /* Breakpoint Mask */
1002 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1003 break;
1004 case 2: /* Breakpoint Control */
1005 env->mmubpregs[reg] = (val & 0x7fULL);
1006 break;
1007 case 3: /* Breakpoint Status */
1008 env->mmubpregs[reg] = (val & 0xfULL);
1009 break;
1011 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
1012 env->mmuregs[reg]);
1014 break;
1015 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1016 env->mmubpctrv = val & 0xffffffff;
1017 break;
1018 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1019 env->mmubpctrc = val & 0x3;
1020 break;
1021 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1022 env->mmubpctrs = val & 0x3;
1023 break;
1024 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1025 env->mmubpaction = val & 0x1fff;
1026 break;
1027 case ASI_USERTXT: /* User code access, XXX */
1028 case ASI_KERNELTXT: /* Supervisor code access, XXX */
1029 default:
1030 cpu_unassigned_access(CPU(sparc_env_get_cpu(env)),
1031 addr, true, false, asi, size);
1032 break;
1034 case ASI_USERDATA: /* User data access */
1035 case ASI_KERNELDATA: /* Supervisor data access */
1036 case ASI_P:
1037 case ASI_M_BYPASS: /* MMU passthrough */
1038 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1039 case ASI_M_BCOPY: /* Block copy, sta access */
1040 case ASI_M_BFILL: /* Block fill, stda access */
1041 /* These are always handled inline. */
1042 g_assert_not_reached();
1044 #ifdef DEBUG_ASI
1045 dump_asi("write", addr, asi, size, val);
1046 #endif
1049 #endif /* CONFIG_USER_ONLY */
1050 #else /* TARGET_SPARC64 */
1052 #ifdef CONFIG_USER_ONLY
1053 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1054 int asi, uint32_t memop)
1056 int size = 1 << (memop & MO_SIZE);
1057 int sign = memop & MO_SIGN;
1058 uint64_t ret = 0;
1060 if (asi < 0x80) {
1061 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1063 do_check_align(env, addr, size - 1, GETPC());
1064 addr = asi_address_mask(env, asi, addr);
1066 switch (asi) {
1067 case ASI_PNF: /* Primary no-fault */
1068 case ASI_PNFL: /* Primary no-fault LE */
1069 case ASI_SNF: /* Secondary no-fault */
1070 case ASI_SNFL: /* Secondary no-fault LE */
1071 if (page_check_range(addr, size, PAGE_READ) == -1) {
1072 ret = 0;
1073 break;
1075 switch (size) {
1076 case 1:
1077 ret = cpu_ldub_data(env, addr);
1078 break;
1079 case 2:
1080 ret = cpu_lduw_data(env, addr);
1081 break;
1082 case 4:
1083 ret = cpu_ldl_data(env, addr);
1084 break;
1085 case 8:
1086 ret = cpu_ldq_data(env, addr);
1087 break;
1088 default:
1089 g_assert_not_reached();
1091 break;
1092 break;
1094 case ASI_P: /* Primary */
1095 case ASI_PL: /* Primary LE */
1096 case ASI_S: /* Secondary */
1097 case ASI_SL: /* Secondary LE */
1098 /* These are always handled inline. */
1099 g_assert_not_reached();
1101 default:
1102 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1105 /* Convert from little endian */
1106 switch (asi) {
1107 case ASI_PNFL: /* Primary no-fault LE */
1108 case ASI_SNFL: /* Secondary no-fault LE */
1109 switch (size) {
1110 case 2:
1111 ret = bswap16(ret);
1112 break;
1113 case 4:
1114 ret = bswap32(ret);
1115 break;
1116 case 8:
1117 ret = bswap64(ret);
1118 break;
1122 /* Convert to signed number */
1123 if (sign) {
1124 switch (size) {
1125 case 1:
1126 ret = (int8_t) ret;
1127 break;
1128 case 2:
1129 ret = (int16_t) ret;
1130 break;
1131 case 4:
1132 ret = (int32_t) ret;
1133 break;
1136 #ifdef DEBUG_ASI
1137 dump_asi("read", addr, asi, size, ret);
1138 #endif
1139 return ret;
1142 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1143 int asi, uint32_t memop)
1145 int size = 1 << (memop & MO_SIZE);
1146 #ifdef DEBUG_ASI
1147 dump_asi("write", addr, asi, size, val);
1148 #endif
1149 if (asi < 0x80) {
1150 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1152 do_check_align(env, addr, size - 1, GETPC());
1154 switch (asi) {
1155 case ASI_P: /* Primary */
1156 case ASI_PL: /* Primary LE */
1157 case ASI_S: /* Secondary */
1158 case ASI_SL: /* Secondary LE */
1159 /* These are always handled inline. */
1160 g_assert_not_reached();
1162 case ASI_PNF: /* Primary no-fault, RO */
1163 case ASI_SNF: /* Secondary no-fault, RO */
1164 case ASI_PNFL: /* Primary no-fault LE, RO */
1165 case ASI_SNFL: /* Secondary no-fault LE, RO */
1166 default:
1167 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1171 #else /* CONFIG_USER_ONLY */
1173 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1174 int asi, uint32_t memop)
1176 int size = 1 << (memop & MO_SIZE);
1177 int sign = memop & MO_SIGN;
1178 CPUState *cs = CPU(sparc_env_get_cpu(env));
1179 uint64_t ret = 0;
1180 #if defined(DEBUG_ASI)
1181 target_ulong last_addr = addr;
1182 #endif
1184 asi &= 0xff;
1186 do_check_asi(env, asi, GETPC());
1187 do_check_align(env, addr, size - 1, GETPC());
1188 addr = asi_address_mask(env, asi, addr);
1190 switch (asi) {
1191 case ASI_PNF:
1192 case ASI_PNFL:
1193 case ASI_SNF:
1194 case ASI_SNFL:
1196 TCGMemOpIdx oi;
1197 int idx = (env->pstate & PS_PRIV
1198 ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
1199 : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
1201 if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) {
1202 #ifdef DEBUG_ASI
1203 dump_asi("read ", last_addr, asi, size, ret);
1204 #endif
1205 /* exception_index is set in get_physical_address_data. */
1206 cpu_raise_exception_ra(env, cs->exception_index, GETPC());
1208 oi = make_memop_idx(memop, idx);
1209 switch (size) {
1210 case 1:
1211 ret = helper_ret_ldub_mmu(env, addr, oi, GETPC());
1212 break;
1213 case 2:
1214 if (asi & 8) {
1215 ret = helper_le_lduw_mmu(env, addr, oi, GETPC());
1216 } else {
1217 ret = helper_be_lduw_mmu(env, addr, oi, GETPC());
1219 break;
1220 case 4:
1221 if (asi & 8) {
1222 ret = helper_le_ldul_mmu(env, addr, oi, GETPC());
1223 } else {
1224 ret = helper_be_ldul_mmu(env, addr, oi, GETPC());
1226 break;
1227 case 8:
1228 if (asi & 8) {
1229 ret = helper_le_ldq_mmu(env, addr, oi, GETPC());
1230 } else {
1231 ret = helper_be_ldq_mmu(env, addr, oi, GETPC());
1233 break;
1234 default:
1235 g_assert_not_reached();
1238 break;
1240 case ASI_AIUP: /* As if user primary */
1241 case ASI_AIUS: /* As if user secondary */
1242 case ASI_AIUPL: /* As if user primary LE */
1243 case ASI_AIUSL: /* As if user secondary LE */
1244 case ASI_P: /* Primary */
1245 case ASI_S: /* Secondary */
1246 case ASI_PL: /* Primary LE */
1247 case ASI_SL: /* Secondary LE */
1248 case ASI_REAL: /* Bypass */
1249 case ASI_REAL_IO: /* Bypass, non-cacheable */
1250 case ASI_REAL_L: /* Bypass LE */
1251 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1252 case ASI_N: /* Nucleus */
1253 case ASI_NL: /* Nucleus Little Endian (LE) */
1254 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1255 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1256 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1257 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1258 case ASI_TWINX_REAL: /* Real address, twinx */
1259 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1260 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1261 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1262 case ASI_TWINX_N: /* Nucleus, twinx */
1263 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1264 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1265 case ASI_TWINX_P: /* Primary, twinx */
1266 case ASI_TWINX_PL: /* Primary, twinx, LE */
1267 case ASI_TWINX_S: /* Secondary, twinx */
1268 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1269 /* These are always handled inline. */
1270 g_assert_not_reached();
1272 case ASI_UPA_CONFIG: /* UPA config */
1273 /* XXX */
1274 break;
1275 case ASI_LSU_CONTROL: /* LSU */
1276 ret = env->lsu;
1277 break;
1278 case ASI_IMMU: /* I-MMU regs */
1280 int reg = (addr >> 3) & 0xf;
1281 switch (reg) {
1282 case 0:
1283 /* 0x00 I-TSB Tag Target register */
1284 ret = ultrasparc_tag_target(env->immu.tag_access);
1285 break;
1286 case 3: /* SFSR */
1287 ret = env->immu.sfsr;
1288 break;
1289 case 5: /* TSB access */
1290 ret = env->immu.tsb;
1291 break;
1292 case 6:
1293 /* 0x30 I-TSB Tag Access register */
1294 ret = env->immu.tag_access;
1295 break;
1296 default:
1297 cpu_unassigned_access(cs, addr, false, false, 1, size);
1298 ret = 0;
1300 break;
1302 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */
1304 /* env->immuregs[5] holds I-MMU TSB register value
1305 env->immuregs[6] holds I-MMU Tag Access register value */
1306 ret = ultrasparc_tsb_pointer(env, &env->immu, 0);
1307 break;
1309 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */
1311 /* env->immuregs[5] holds I-MMU TSB register value
1312 env->immuregs[6] holds I-MMU Tag Access register value */
1313 ret = ultrasparc_tsb_pointer(env, &env->immu, 1);
1314 break;
1316 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1318 int reg = (addr >> 3) & 0x3f;
1320 ret = env->itlb[reg].tte;
1321 break;
1323 case ASI_ITLB_TAG_READ: /* I-MMU tag read */
1325 int reg = (addr >> 3) & 0x3f;
1327 ret = env->itlb[reg].tag;
1328 break;
1330 case ASI_DMMU: /* D-MMU regs */
1332 int reg = (addr >> 3) & 0xf;
1333 switch (reg) {
1334 case 0:
1335 /* 0x00 D-TSB Tag Target register */
1336 ret = ultrasparc_tag_target(env->dmmu.tag_access);
1337 break;
1338 case 1: /* 0x08 Primary Context */
1339 ret = env->dmmu.mmu_primary_context;
1340 break;
1341 case 2: /* 0x10 Secondary Context */
1342 ret = env->dmmu.mmu_secondary_context;
1343 break;
1344 case 3: /* SFSR */
1345 ret = env->dmmu.sfsr;
1346 break;
1347 case 4: /* 0x20 SFAR */
1348 ret = env->dmmu.sfar;
1349 break;
1350 case 5: /* 0x28 TSB access */
1351 ret = env->dmmu.tsb;
1352 break;
1353 case 6: /* 0x30 D-TSB Tag Access register */
1354 ret = env->dmmu.tag_access;
1355 break;
1356 case 7:
1357 ret = env->dmmu.virtual_watchpoint;
1358 break;
1359 case 8:
1360 ret = env->dmmu.physical_watchpoint;
1361 break;
1362 default:
1363 cpu_unassigned_access(cs, addr, false, false, 1, size);
1364 ret = 0;
1366 break;
1368 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */
1370 /* env->dmmuregs[5] holds D-MMU TSB register value
1371 env->dmmuregs[6] holds D-MMU Tag Access register value */
1372 ret = ultrasparc_tsb_pointer(env, &env->dmmu, 0);
1373 break;
1375 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */
1377 /* env->dmmuregs[5] holds D-MMU TSB register value
1378 env->dmmuregs[6] holds D-MMU Tag Access register value */
1379 ret = ultrasparc_tsb_pointer(env, &env->dmmu, 1);
1380 break;
1382 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1384 int reg = (addr >> 3) & 0x3f;
1386 ret = env->dtlb[reg].tte;
1387 break;
1389 case ASI_DTLB_TAG_READ: /* D-MMU tag read */
1391 int reg = (addr >> 3) & 0x3f;
1393 ret = env->dtlb[reg].tag;
1394 break;
1396 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1397 break;
1398 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1399 ret = env->ivec_status;
1400 break;
1401 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1403 int reg = (addr >> 4) & 0x3;
1404 if (reg < 3) {
1405 ret = env->ivec_data[reg];
1407 break;
1409 case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
1410 if (unlikely((addr >= 0x20) && (addr < 0x30))) {
1411 /* Hyperprivileged access only */
1412 cpu_unassigned_access(cs, addr, false, false, 1, size);
1414 /* fall through */
1415 case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
1417 unsigned int i = (addr >> 3) & 0x7;
1418 ret = env->scratch[i];
1419 break;
1421 case ASI_MMU: /* UA2005 Context ID registers */
1422 switch ((addr >> 3) & 0x3) {
1423 case 1:
1424 ret = env->dmmu.mmu_primary_context;
1425 break;
1426 case 2:
1427 ret = env->dmmu.mmu_secondary_context;
1428 break;
1429 default:
1430 cpu_unassigned_access(cs, addr, true, false, 1, size);
1432 break;
1433 case ASI_DCACHE_DATA: /* D-cache data */
1434 case ASI_DCACHE_TAG: /* D-cache tag access */
1435 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1436 case ASI_AFSR: /* E-cache asynchronous fault status */
1437 case ASI_AFAR: /* E-cache asynchronous fault address */
1438 case ASI_EC_TAG_DATA: /* E-cache tag data */
1439 case ASI_IC_INSTR: /* I-cache instruction access */
1440 case ASI_IC_TAG: /* I-cache tag access */
1441 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1442 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1443 case ASI_EC_W: /* E-cache tag */
1444 case ASI_EC_R: /* E-cache tag */
1445 break;
1446 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */
1447 case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */
1448 case ASI_IMMU_DEMAP: /* I-MMU demap, WO */
1449 case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */
1450 case ASI_DMMU_DEMAP: /* D-MMU demap, WO */
1451 case ASI_INTR_W: /* Interrupt vector, WO */
1452 default:
1453 cpu_unassigned_access(cs, addr, false, false, 1, size);
1454 ret = 0;
1455 break;
1458 /* Convert to signed number */
1459 if (sign) {
1460 switch (size) {
1461 case 1:
1462 ret = (int8_t) ret;
1463 break;
1464 case 2:
1465 ret = (int16_t) ret;
1466 break;
1467 case 4:
1468 ret = (int32_t) ret;
1469 break;
1470 default:
1471 break;
1474 #ifdef DEBUG_ASI
1475 dump_asi("read ", last_addr, asi, size, ret);
1476 #endif
1477 return ret;
1480 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1481 int asi, uint32_t memop)
1483 int size = 1 << (memop & MO_SIZE);
1484 SPARCCPU *cpu = sparc_env_get_cpu(env);
1485 CPUState *cs = CPU(cpu);
1487 #ifdef DEBUG_ASI
1488 dump_asi("write", addr, asi, size, val);
1489 #endif
1491 asi &= 0xff;
1493 do_check_asi(env, asi, GETPC());
1494 do_check_align(env, addr, size - 1, GETPC());
1495 addr = asi_address_mask(env, asi, addr);
1497 switch (asi) {
1498 case ASI_AIUP: /* As if user primary */
1499 case ASI_AIUS: /* As if user secondary */
1500 case ASI_AIUPL: /* As if user primary LE */
1501 case ASI_AIUSL: /* As if user secondary LE */
1502 case ASI_P: /* Primary */
1503 case ASI_S: /* Secondary */
1504 case ASI_PL: /* Primary LE */
1505 case ASI_SL: /* Secondary LE */
1506 case ASI_REAL: /* Bypass */
1507 case ASI_REAL_IO: /* Bypass, non-cacheable */
1508 case ASI_REAL_L: /* Bypass LE */
1509 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1510 case ASI_N: /* Nucleus */
1511 case ASI_NL: /* Nucleus Little Endian (LE) */
1512 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1513 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1514 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1515 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1516 case ASI_TWINX_REAL: /* Real address, twinx */
1517 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1518 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1519 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1520 case ASI_TWINX_N: /* Nucleus, twinx */
1521 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1522 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1523 case ASI_TWINX_P: /* Primary, twinx */
1524 case ASI_TWINX_PL: /* Primary, twinx, LE */
1525 case ASI_TWINX_S: /* Secondary, twinx */
1526 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1527 /* These are always handled inline. */
1528 g_assert_not_reached();
1529 /* these ASIs have different functions on UltraSPARC-IIIi
1530 * and UA2005 CPUs. Use the explicit numbers to avoid confusion
1532 case 0x31:
1533 case 0x32:
1534 case 0x39:
1535 case 0x3a:
1536 if (cpu_has_hypervisor(env)) {
1537 /* UA2005
1538 * ASI_DMMU_CTX_ZERO_TSB_BASE_PS0
1539 * ASI_DMMU_CTX_ZERO_TSB_BASE_PS1
1540 * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS0
1541 * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS1
1543 int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
1544 env->dmmu.sun4v_tsb_pointers[idx] = val;
1545 } else {
1546 helper_raise_exception(env, TT_ILL_INSN);
1548 break;
1549 case 0x33:
1550 case 0x3b:
1551 if (cpu_has_hypervisor(env)) {
1552 /* UA2005
1553 * ASI_DMMU_CTX_ZERO_CONFIG
1554 * ASI_DMMU_CTX_NONZERO_CONFIG
1556 env->dmmu.sun4v_ctx_config[(asi & 8) >> 3] = val;
1557 } else {
1558 helper_raise_exception(env, TT_ILL_INSN);
1560 break;
1561 case 0x35:
1562 case 0x36:
1563 case 0x3d:
1564 case 0x3e:
1565 if (cpu_has_hypervisor(env)) {
1566 /* UA2005
1567 * ASI_IMMU_CTX_ZERO_TSB_BASE_PS0
1568 * ASI_IMMU_CTX_ZERO_TSB_BASE_PS1
1569 * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS0
1570 * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS1
1572 int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
1573 env->immu.sun4v_tsb_pointers[idx] = val;
1574 } else {
1575 helper_raise_exception(env, TT_ILL_INSN);
1577 break;
1578 case 0x37:
1579 case 0x3f:
1580 if (cpu_has_hypervisor(env)) {
1581 /* UA2005
1582 * ASI_IMMU_CTX_ZERO_CONFIG
1583 * ASI_IMMU_CTX_NONZERO_CONFIG
1585 env->immu.sun4v_ctx_config[(asi & 8) >> 3] = val;
1586 } else {
1587 helper_raise_exception(env, TT_ILL_INSN);
1589 break;
1590 case ASI_UPA_CONFIG: /* UPA config */
1591 /* XXX */
1592 return;
1593 case ASI_LSU_CONTROL: /* LSU */
1594 env->lsu = val & (DMMU_E | IMMU_E);
1595 return;
1596 case ASI_IMMU: /* I-MMU regs */
1598 int reg = (addr >> 3) & 0xf;
1599 uint64_t oldreg;
1601 oldreg = env->immu.mmuregs[reg];
1602 switch (reg) {
1603 case 0: /* RO */
1604 return;
1605 case 1: /* Not in I-MMU */
1606 case 2:
1607 return;
1608 case 3: /* SFSR */
1609 if ((val & 1) == 0) {
1610 val = 0; /* Clear SFSR */
1612 env->immu.sfsr = val;
1613 break;
1614 case 4: /* RO */
1615 return;
1616 case 5: /* TSB access */
1617 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
1618 PRIx64 "\n", env->immu.tsb, val);
1619 env->immu.tsb = val;
1620 break;
1621 case 6: /* Tag access */
1622 env->immu.tag_access = val;
1623 break;
1624 case 7:
1625 case 8:
1626 return;
1627 default:
1628 cpu_unassigned_access(cs, addr, true, false, 1, size);
1629 break;
1632 if (oldreg != env->immu.mmuregs[reg]) {
1633 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1634 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1636 #ifdef DEBUG_MMU
1637 dump_mmu(stdout, fprintf, env);
1638 #endif
1639 return;
1641 case ASI_ITLB_DATA_IN: /* I-MMU data in */
1642 /* ignore real translation entries */
1643 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1644 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access,
1645 val, "immu", env, addr);
1647 return;
1648 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1650 /* TODO: auto demap */
1652 unsigned int i = (addr >> 3) & 0x3f;
1654 /* ignore real translation entries */
1655 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1656 replace_tlb_entry(&env->itlb[i], env->immu.tag_access,
1657 sun4v_tte_to_sun4u(env, addr, val), env);
1659 #ifdef DEBUG_MMU
1660 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
1661 dump_mmu(stdout, fprintf, env);
1662 #endif
1663 return;
1665 case ASI_IMMU_DEMAP: /* I-MMU demap */
1666 demap_tlb(env->itlb, addr, "immu", env);
1667 return;
1668 case ASI_DMMU: /* D-MMU regs */
1670 int reg = (addr >> 3) & 0xf;
1671 uint64_t oldreg;
1673 oldreg = env->dmmu.mmuregs[reg];
1674 switch (reg) {
1675 case 0: /* RO */
1676 case 4:
1677 return;
1678 case 3: /* SFSR */
1679 if ((val & 1) == 0) {
1680 val = 0; /* Clear SFSR, Fault address */
1681 env->dmmu.sfar = 0;
1683 env->dmmu.sfsr = val;
1684 break;
1685 case 1: /* Primary context */
1686 env->dmmu.mmu_primary_context = val;
1687 /* can be optimized to only flush MMU_USER_IDX
1688 and MMU_KERNEL_IDX entries */
1689 tlb_flush(CPU(cpu));
1690 break;
1691 case 2: /* Secondary context */
1692 env->dmmu.mmu_secondary_context = val;
1693 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
1694 and MMU_KERNEL_SECONDARY_IDX entries */
1695 tlb_flush(CPU(cpu));
1696 break;
1697 case 5: /* TSB access */
1698 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
1699 PRIx64 "\n", env->dmmu.tsb, val);
1700 env->dmmu.tsb = val;
1701 break;
1702 case 6: /* Tag access */
1703 env->dmmu.tag_access = val;
1704 break;
1705 case 7: /* Virtual Watchpoint */
1706 env->dmmu.virtual_watchpoint = val;
1707 break;
1708 case 8: /* Physical Watchpoint */
1709 env->dmmu.physical_watchpoint = val;
1710 break;
1711 default:
1712 cpu_unassigned_access(cs, addr, true, false, 1, size);
1713 break;
1716 if (oldreg != env->dmmu.mmuregs[reg]) {
1717 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1718 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
1720 #ifdef DEBUG_MMU
1721 dump_mmu(stdout, fprintf, env);
1722 #endif
1723 return;
1725 case ASI_DTLB_DATA_IN: /* D-MMU data in */
1726 /* ignore real translation entries */
1727 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1728 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access,
1729 val, "dmmu", env, addr);
1731 return;
1732 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1734 unsigned int i = (addr >> 3) & 0x3f;
1736 /* ignore real translation entries */
1737 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1738 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access,
1739 sun4v_tte_to_sun4u(env, addr, val), env);
1741 #ifdef DEBUG_MMU
1742 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
1743 dump_mmu(stdout, fprintf, env);
1744 #endif
1745 return;
1747 case ASI_DMMU_DEMAP: /* D-MMU demap */
1748 demap_tlb(env->dtlb, addr, "dmmu", env);
1749 return;
1750 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1751 env->ivec_status = val & 0x20;
1752 return;
1753 case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
1754 if (unlikely((addr >= 0x20) && (addr < 0x30))) {
1755 /* Hyperprivileged access only */
1756 cpu_unassigned_access(cs, addr, true, false, 1, size);
1758 /* fall through */
1759 case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
1761 unsigned int i = (addr >> 3) & 0x7;
1762 env->scratch[i] = val;
1763 return;
1765 case ASI_MMU: /* UA2005 Context ID registers */
1767 switch ((addr >> 3) & 0x3) {
1768 case 1:
1769 env->dmmu.mmu_primary_context = val;
1770 env->immu.mmu_primary_context = val;
1771 tlb_flush_by_mmuidx(CPU(cpu),
1772 (1 << MMU_USER_IDX) | (1 << MMU_KERNEL_IDX));
1773 break;
1774 case 2:
1775 env->dmmu.mmu_secondary_context = val;
1776 env->immu.mmu_secondary_context = val;
1777 tlb_flush_by_mmuidx(CPU(cpu),
1778 (1 << MMU_USER_SECONDARY_IDX) |
1779 (1 << MMU_KERNEL_SECONDARY_IDX));
1780 break;
1781 default:
1782 cpu_unassigned_access(cs, addr, true, false, 1, size);
1785 return;
1786 case ASI_QUEUE: /* UA2005 CPU mondo queue */
1787 case ASI_DCACHE_DATA: /* D-cache data */
1788 case ASI_DCACHE_TAG: /* D-cache tag access */
1789 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1790 case ASI_AFSR: /* E-cache asynchronous fault status */
1791 case ASI_AFAR: /* E-cache asynchronous fault address */
1792 case ASI_EC_TAG_DATA: /* E-cache tag data */
1793 case ASI_IC_INSTR: /* I-cache instruction access */
1794 case ASI_IC_TAG: /* I-cache tag access */
1795 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1796 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1797 case ASI_EC_W: /* E-cache tag */
1798 case ASI_EC_R: /* E-cache tag */
1799 return;
1800 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */
1801 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */
1802 case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */
1803 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */
1804 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */
1805 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */
1806 case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */
1807 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1808 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1809 case ASI_PNF: /* Primary no-fault, RO */
1810 case ASI_SNF: /* Secondary no-fault, RO */
1811 case ASI_PNFL: /* Primary no-fault LE, RO */
1812 case ASI_SNFL: /* Secondary no-fault LE, RO */
1813 default:
1814 cpu_unassigned_access(cs, addr, true, false, 1, size);
1815 return;
1818 #endif /* CONFIG_USER_ONLY */
1819 #endif /* TARGET_SPARC64 */
1821 #if !defined(CONFIG_USER_ONLY)
1822 #ifndef TARGET_SPARC64
1823 void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
1824 bool is_write, bool is_exec, int is_asi,
1825 unsigned size)
1827 SPARCCPU *cpu = SPARC_CPU(cs);
1828 CPUSPARCState *env = &cpu->env;
1829 int fault_type;
1831 #ifdef DEBUG_UNASSIGNED
1832 if (is_asi) {
1833 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
1834 " asi 0x%02x from " TARGET_FMT_lx "\n",
1835 is_exec ? "exec" : is_write ? "write" : "read", size,
1836 size == 1 ? "" : "s", addr, is_asi, env->pc);
1837 } else {
1838 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
1839 " from " TARGET_FMT_lx "\n",
1840 is_exec ? "exec" : is_write ? "write" : "read", size,
1841 size == 1 ? "" : "s", addr, env->pc);
1843 #endif
1844 /* Don't overwrite translation and access faults */
1845 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
1846 if ((fault_type > 4) || (fault_type == 0)) {
1847 env->mmuregs[3] = 0; /* Fault status register */
1848 if (is_asi) {
1849 env->mmuregs[3] |= 1 << 16;
1851 if (env->psrs) {
1852 env->mmuregs[3] |= 1 << 5;
1854 if (is_exec) {
1855 env->mmuregs[3] |= 1 << 6;
1857 if (is_write) {
1858 env->mmuregs[3] |= 1 << 7;
1860 env->mmuregs[3] |= (5 << 2) | 2;
1861 /* SuperSPARC will never place instruction fault addresses in the FAR */
1862 if (!is_exec) {
1863 env->mmuregs[4] = addr; /* Fault address register */
1866 /* overflow (same type fault was not read before another fault) */
1867 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
1868 env->mmuregs[3] |= 1;
1871 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
1872 int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
1873 cpu_raise_exception_ra(env, tt, GETPC());
1876 /* flush neverland mappings created during no-fault mode,
1877 so the sequential MMU faults report proper fault types */
1878 if (env->mmuregs[0] & MMU_NF) {
1879 tlb_flush(cs);
1882 #else
1883 void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
1884 bool is_write, bool is_exec, int is_asi,
1885 unsigned size)
1887 SPARCCPU *cpu = SPARC_CPU(cs);
1888 CPUSPARCState *env = &cpu->env;
1890 #ifdef DEBUG_UNASSIGNED
1891 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
1892 "\n", addr, env->pc);
1893 #endif
1895 if (is_exec) { /* XXX has_hypervisor */
1896 if (env->lsu & (IMMU_E)) {
1897 cpu_raise_exception_ra(env, TT_CODE_ACCESS, GETPC());
1898 } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
1899 cpu_raise_exception_ra(env, TT_INSN_REAL_TRANSLATION_MISS, GETPC());
1901 } else {
1902 if (env->lsu & (DMMU_E)) {
1903 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1904 } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
1905 cpu_raise_exception_ra(env, TT_DATA_REAL_TRANSLATION_MISS, GETPC());
1909 #endif
1910 #endif
1912 #if !defined(CONFIG_USER_ONLY)
1913 void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1914 MMUAccessType access_type,
1915 int mmu_idx,
1916 uintptr_t retaddr)
1918 SPARCCPU *cpu = SPARC_CPU(cs);
1919 CPUSPARCState *env = &cpu->env;
1921 #ifdef DEBUG_UNALIGNED
1922 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
1923 "\n", addr, env->pc);
1924 #endif
1925 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
1928 /* try to fill the TLB and return an exception if error. If retaddr is
1929 NULL, it means that the function was called in C code (i.e. not
1930 from generated code or from helper.c) */
1931 /* XXX: fix it to restore all registers */
1932 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
1933 int mmu_idx, uintptr_t retaddr)
1935 int ret;
1937 ret = sparc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
1938 if (ret) {
1939 cpu_loop_exit_restore(cs, retaddr);
1942 #endif