virtio-balloon: Wrap in vmstate
[qemu.git] / target-sparc / ldst_helper.c
blob6ce5ccc37f1adb4323f40cb17daad58b4b3249da
1 /*
2 * Helpers for loads and stores
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "tcg.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "asi.h"
28 //#define DEBUG_MMU
29 //#define DEBUG_MXCC
30 //#define DEBUG_UNALIGNED
31 //#define DEBUG_UNASSIGNED
32 //#define DEBUG_ASI
33 //#define DEBUG_CACHE_CONTROL
35 #ifdef DEBUG_MMU
36 #define DPRINTF_MMU(fmt, ...) \
37 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
38 #else
39 #define DPRINTF_MMU(fmt, ...) do {} while (0)
40 #endif
42 #ifdef DEBUG_MXCC
43 #define DPRINTF_MXCC(fmt, ...) \
44 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
45 #else
46 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
47 #endif
49 #ifdef DEBUG_ASI
50 #define DPRINTF_ASI(fmt, ...) \
51 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
52 #endif
54 #ifdef DEBUG_CACHE_CONTROL
55 #define DPRINTF_CACHE_CONTROL(fmt, ...) \
56 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
57 #else
58 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
59 #endif
61 #ifdef TARGET_SPARC64
62 #ifndef TARGET_ABI32
63 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
64 #else
65 #define AM_CHECK(env1) (1)
66 #endif
67 #endif
69 #define QT0 (env->qt0)
70 #define QT1 (env->qt1)
72 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
73 /* Calculates TSB pointer value for fault page size 8k or 64k */
74 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
75 uint64_t tag_access_register,
76 int page_size)
78 uint64_t tsb_base = tsb_register & ~0x1fffULL;
79 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
80 int tsb_size = tsb_register & 0xf;
82 /* discard lower 13 bits which hold tag access context */
83 uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
85 /* now reorder bits */
86 uint64_t tsb_base_mask = ~0x1fffULL;
87 uint64_t va = tag_access_va;
89 /* move va bits to correct position */
90 if (page_size == 8*1024) {
91 va >>= 9;
92 } else if (page_size == 64*1024) {
93 va >>= 12;
96 if (tsb_size) {
97 tsb_base_mask <<= tsb_size;
100 /* calculate tsb_base mask and adjust va if split is in use */
101 if (tsb_split) {
102 if (page_size == 8*1024) {
103 va &= ~(1ULL << (13 + tsb_size));
104 } else if (page_size == 64*1024) {
105 va |= (1ULL << (13 + tsb_size));
107 tsb_base_mask <<= 1;
110 return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
113 /* Calculates tag target register value by reordering bits
114 in tag access register */
115 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
117 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
120 static void replace_tlb_entry(SparcTLBEntry *tlb,
121 uint64_t tlb_tag, uint64_t tlb_tte,
122 CPUSPARCState *env1)
124 target_ulong mask, size, va, offset;
126 /* flush page range if translation is valid */
127 if (TTE_IS_VALID(tlb->tte)) {
128 CPUState *cs = CPU(sparc_env_get_cpu(env1));
130 mask = 0xffffffffffffe000ULL;
131 mask <<= 3 * ((tlb->tte >> 61) & 3);
132 size = ~mask + 1;
134 va = tlb->tag & mask;
136 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
137 tlb_flush_page(cs, va + offset);
141 tlb->tag = tlb_tag;
142 tlb->tte = tlb_tte;
145 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
146 const char *strmmu, CPUSPARCState *env1)
148 unsigned int i;
149 target_ulong mask;
150 uint64_t context;
152 int is_demap_context = (demap_addr >> 6) & 1;
154 /* demap context */
155 switch ((demap_addr >> 4) & 3) {
156 case 0: /* primary */
157 context = env1->dmmu.mmu_primary_context;
158 break;
159 case 1: /* secondary */
160 context = env1->dmmu.mmu_secondary_context;
161 break;
162 case 2: /* nucleus */
163 context = 0;
164 break;
165 case 3: /* reserved */
166 default:
167 return;
170 for (i = 0; i < 64; i++) {
171 if (TTE_IS_VALID(tlb[i].tte)) {
173 if (is_demap_context) {
174 /* will remove non-global entries matching context value */
175 if (TTE_IS_GLOBAL(tlb[i].tte) ||
176 !tlb_compare_context(&tlb[i], context)) {
177 continue;
179 } else {
180 /* demap page
181 will remove any entry matching VA */
182 mask = 0xffffffffffffe000ULL;
183 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
185 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
186 continue;
189 /* entry should be global or matching context value */
190 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
191 !tlb_compare_context(&tlb[i], context)) {
192 continue;
196 replace_tlb_entry(&tlb[i], 0, 0, env1);
197 #ifdef DEBUG_MMU
198 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
199 dump_mmu(stdout, fprintf, env1);
200 #endif
205 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
206 uint64_t tlb_tag, uint64_t tlb_tte,
207 const char *strmmu, CPUSPARCState *env1)
209 unsigned int i, replace_used;
211 /* Try replacing invalid entry */
212 for (i = 0; i < 64; i++) {
213 if (!TTE_IS_VALID(tlb[i].tte)) {
214 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
215 #ifdef DEBUG_MMU
216 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
217 dump_mmu(stdout, fprintf, env1);
218 #endif
219 return;
223 /* All entries are valid, try replacing unlocked entry */
225 for (replace_used = 0; replace_used < 2; ++replace_used) {
227 /* Used entries are not replaced on first pass */
229 for (i = 0; i < 64; i++) {
230 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
232 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
233 #ifdef DEBUG_MMU
234 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
235 strmmu, (replace_used ? "used" : "unused"), i);
236 dump_mmu(stdout, fprintf, env1);
237 #endif
238 return;
242 /* Now reset used bit and search for unused entries again */
244 for (i = 0; i < 64; i++) {
245 TTE_SET_UNUSED(tlb[i].tte);
249 #ifdef DEBUG_MMU
250 DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
251 #endif
252 /* error state? */
255 #endif
257 #if defined(TARGET_SPARC64) || defined(CONFIG_USER_ONLY)
258 static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr)
260 #ifdef TARGET_SPARC64
261 if (AM_CHECK(env1)) {
262 addr &= 0xffffffffULL;
264 #endif
265 return addr;
267 #endif
269 #ifdef TARGET_SPARC64
270 /* returns true if access using this ASI is to have address translated by MMU
271 otherwise access is to raw physical address */
272 /* TODO: check sparc32 bits */
273 static inline int is_translating_asi(int asi)
275 /* Ultrasparc IIi translating asi
276 - note this list is defined by cpu implementation
278 switch (asi) {
279 case 0x04 ... 0x11:
280 case 0x16 ... 0x19:
281 case 0x1E ... 0x1F:
282 case 0x24 ... 0x2C:
283 case 0x70 ... 0x73:
284 case 0x78 ... 0x79:
285 case 0x80 ... 0xFF:
286 return 1;
288 default:
289 return 0;
293 static inline target_ulong asi_address_mask(CPUSPARCState *env,
294 int asi, target_ulong addr)
296 if (is_translating_asi(asi)) {
297 return address_mask(env, addr);
298 } else {
299 return addr;
302 #endif
304 void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align)
306 if (addr & align) {
307 #ifdef DEBUG_UNALIGNED
308 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
309 "\n", addr, env->pc);
310 #endif
311 helper_raise_exception(env, TT_UNALIGNED);
315 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
316 defined(DEBUG_MXCC)
317 static void dump_mxcc(CPUSPARCState *env)
319 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
320 "\n",
321 env->mxccdata[0], env->mxccdata[1],
322 env->mxccdata[2], env->mxccdata[3]);
323 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
324 "\n"
325 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
326 "\n",
327 env->mxccregs[0], env->mxccregs[1],
328 env->mxccregs[2], env->mxccregs[3],
329 env->mxccregs[4], env->mxccregs[5],
330 env->mxccregs[6], env->mxccregs[7]);
332 #endif
334 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
335 && defined(DEBUG_ASI)
336 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
337 uint64_t r1)
339 switch (size) {
340 case 1:
341 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
342 addr, asi, r1 & 0xff);
343 break;
344 case 2:
345 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
346 addr, asi, r1 & 0xffff);
347 break;
348 case 4:
349 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
350 addr, asi, r1 & 0xffffffff);
351 break;
352 case 8:
353 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
354 addr, asi, r1);
355 break;
358 #endif
360 #ifndef TARGET_SPARC64
361 #ifndef CONFIG_USER_ONLY
364 /* Leon3 cache control */
366 static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr,
367 uint64_t val, int size)
369 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
370 addr, val, size);
372 if (size != 4) {
373 DPRINTF_CACHE_CONTROL("32bits only\n");
374 return;
377 switch (addr) {
378 case 0x00: /* Cache control */
380 /* These values must always be read as zeros */
381 val &= ~CACHE_CTRL_FD;
382 val &= ~CACHE_CTRL_FI;
383 val &= ~CACHE_CTRL_IB;
384 val &= ~CACHE_CTRL_IP;
385 val &= ~CACHE_CTRL_DP;
387 env->cache_control = val;
388 break;
389 case 0x04: /* Instruction cache configuration */
390 case 0x08: /* Data cache configuration */
391 /* Read Only */
392 break;
393 default:
394 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
395 break;
399 static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
400 int size)
402 uint64_t ret = 0;
404 if (size != 4) {
405 DPRINTF_CACHE_CONTROL("32bits only\n");
406 return 0;
409 switch (addr) {
410 case 0x00: /* Cache control */
411 ret = env->cache_control;
412 break;
414 /* Configuration registers are read and only always keep those
415 predefined values */
417 case 0x04: /* Instruction cache configuration */
418 ret = 0x10220000;
419 break;
420 case 0x08: /* Data cache configuration */
421 ret = 0x18220000;
422 break;
423 default:
424 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
425 break;
427 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
428 addr, ret, size);
429 return ret;
432 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
433 int asi, uint32_t memop)
435 int size = 1 << (memop & MO_SIZE);
436 int sign = memop & MO_SIGN;
437 CPUState *cs = CPU(sparc_env_get_cpu(env));
438 uint64_t ret = 0;
439 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
440 uint32_t last_addr = addr;
441 #endif
443 helper_check_align(env, addr, size - 1);
444 switch (asi) {
445 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
446 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
447 switch (addr) {
448 case 0x00: /* Leon3 Cache Control */
449 case 0x08: /* Leon3 Instruction Cache config */
450 case 0x0C: /* Leon3 Date Cache config */
451 if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
452 ret = leon3_cache_control_ld(env, addr, size);
454 break;
455 case 0x01c00a00: /* MXCC control register */
456 if (size == 8) {
457 ret = env->mxccregs[3];
458 } else {
459 qemu_log_mask(LOG_UNIMP,
460 "%08x: unimplemented access size: %d\n", addr,
461 size);
463 break;
464 case 0x01c00a04: /* MXCC control register */
465 if (size == 4) {
466 ret = env->mxccregs[3];
467 } else {
468 qemu_log_mask(LOG_UNIMP,
469 "%08x: unimplemented access size: %d\n", addr,
470 size);
472 break;
473 case 0x01c00c00: /* Module reset register */
474 if (size == 8) {
475 ret = env->mxccregs[5];
476 /* should we do something here? */
477 } else {
478 qemu_log_mask(LOG_UNIMP,
479 "%08x: unimplemented access size: %d\n", addr,
480 size);
482 break;
483 case 0x01c00f00: /* MBus port address register */
484 if (size == 8) {
485 ret = env->mxccregs[7];
486 } else {
487 qemu_log_mask(LOG_UNIMP,
488 "%08x: unimplemented access size: %d\n", addr,
489 size);
491 break;
492 default:
493 qemu_log_mask(LOG_UNIMP,
494 "%08x: unimplemented address, size: %d\n", addr,
495 size);
496 break;
498 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
499 "addr = %08x -> ret = %" PRIx64 ","
500 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
501 #ifdef DEBUG_MXCC
502 dump_mxcc(env);
503 #endif
504 break;
505 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */
506 case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */
508 int mmulev;
510 mmulev = (addr >> 8) & 15;
511 if (mmulev > 4) {
512 ret = 0;
513 } else {
514 ret = mmu_probe(env, addr, mmulev);
516 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
517 addr, mmulev, ret);
519 break;
520 case ASI_M_MMUREGS: /* SuperSparc MMU regs */
521 case ASI_LEON_MMUREGS: /* LEON3 MMU regs */
523 int reg = (addr >> 8) & 0x1f;
525 ret = env->mmuregs[reg];
526 if (reg == 3) { /* Fault status cleared on read */
527 env->mmuregs[3] = 0;
528 } else if (reg == 0x13) { /* Fault status read */
529 ret = env->mmuregs[3];
530 } else if (reg == 0x14) { /* Fault address read */
531 ret = env->mmuregs[4];
533 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
535 break;
536 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
537 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
538 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
539 break;
540 case ASI_KERNELTXT: /* Supervisor code access */
541 switch (size) {
542 case 1:
543 ret = cpu_ldub_code(env, addr);
544 break;
545 case 2:
546 ret = cpu_lduw_code(env, addr);
547 break;
548 default:
549 case 4:
550 ret = cpu_ldl_code(env, addr);
551 break;
552 case 8:
553 ret = cpu_ldq_code(env, addr);
554 break;
556 break;
557 case ASI_USERDATA: /* User data access */
558 switch (size) {
559 case 1:
560 ret = cpu_ldub_user(env, addr);
561 break;
562 case 2:
563 ret = cpu_lduw_user(env, addr);
564 break;
565 default:
566 case 4:
567 ret = cpu_ldl_user(env, addr);
568 break;
569 case 8:
570 ret = cpu_ldq_user(env, addr);
571 break;
573 break;
574 case ASI_KERNELDATA: /* Supervisor data access */
575 case ASI_P: /* Implicit primary context data access (v9 only?) */
576 switch (size) {
577 case 1:
578 ret = cpu_ldub_kernel(env, addr);
579 break;
580 case 2:
581 ret = cpu_lduw_kernel(env, addr);
582 break;
583 default:
584 case 4:
585 ret = cpu_ldl_kernel(env, addr);
586 break;
587 case 8:
588 ret = cpu_ldq_kernel(env, addr);
589 break;
591 break;
592 case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */
593 case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */
594 case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */
595 case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */
596 break;
597 case ASI_M_BYPASS: /* MMU passthrough */
598 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
599 switch (size) {
600 case 1:
601 ret = ldub_phys(cs->as, addr);
602 break;
603 case 2:
604 ret = lduw_phys(cs->as, addr);
605 break;
606 default:
607 case 4:
608 ret = ldl_phys(cs->as, addr);
609 break;
610 case 8:
611 ret = ldq_phys(cs->as, addr);
612 break;
614 break;
615 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
616 switch (size) {
617 case 1:
618 ret = ldub_phys(cs->as, (hwaddr)addr
619 | ((hwaddr)(asi & 0xf) << 32));
620 break;
621 case 2:
622 ret = lduw_phys(cs->as, (hwaddr)addr
623 | ((hwaddr)(asi & 0xf) << 32));
624 break;
625 default:
626 case 4:
627 ret = ldl_phys(cs->as, (hwaddr)addr
628 | ((hwaddr)(asi & 0xf) << 32));
629 break;
630 case 8:
631 ret = ldq_phys(cs->as, (hwaddr)addr
632 | ((hwaddr)(asi & 0xf) << 32));
633 break;
635 break;
636 case 0x30: /* Turbosparc secondary cache diagnostic */
637 case 0x31: /* Turbosparc RAM snoop */
638 case 0x32: /* Turbosparc page table descriptor diagnostic */
639 case 0x39: /* data cache diagnostic register */
640 ret = 0;
641 break;
642 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
644 int reg = (addr >> 8) & 3;
646 switch (reg) {
647 case 0: /* Breakpoint Value (Addr) */
648 ret = env->mmubpregs[reg];
649 break;
650 case 1: /* Breakpoint Mask */
651 ret = env->mmubpregs[reg];
652 break;
653 case 2: /* Breakpoint Control */
654 ret = env->mmubpregs[reg];
655 break;
656 case 3: /* Breakpoint Status */
657 ret = env->mmubpregs[reg];
658 env->mmubpregs[reg] = 0ULL;
659 break;
661 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
662 ret);
664 break;
665 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
666 ret = env->mmubpctrv;
667 break;
668 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
669 ret = env->mmubpctrc;
670 break;
671 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
672 ret = env->mmubpctrs;
673 break;
674 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
675 ret = env->mmubpaction;
676 break;
677 case ASI_USERTXT: /* User code access, XXX */
678 default:
679 cpu_unassigned_access(cs, addr, false, false, asi, size);
680 ret = 0;
681 break;
683 if (sign) {
684 switch (size) {
685 case 1:
686 ret = (int8_t) ret;
687 break;
688 case 2:
689 ret = (int16_t) ret;
690 break;
691 case 4:
692 ret = (int32_t) ret;
693 break;
694 default:
695 break;
698 #ifdef DEBUG_ASI
699 dump_asi("read ", last_addr, asi, size, ret);
700 #endif
701 return ret;
704 void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
705 int asi, uint32_t memop)
707 int size = 1 << (memop & MO_SIZE);
708 SPARCCPU *cpu = sparc_env_get_cpu(env);
709 CPUState *cs = CPU(cpu);
711 helper_check_align(env, addr, size - 1);
712 switch (asi) {
713 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
714 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
715 switch (addr) {
716 case 0x00: /* Leon3 Cache Control */
717 case 0x08: /* Leon3 Instruction Cache config */
718 case 0x0C: /* Leon3 Date Cache config */
719 if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
720 leon3_cache_control_st(env, addr, val, size);
722 break;
724 case 0x01c00000: /* MXCC stream data register 0 */
725 if (size == 8) {
726 env->mxccdata[0] = val;
727 } else {
728 qemu_log_mask(LOG_UNIMP,
729 "%08x: unimplemented access size: %d\n", addr,
730 size);
732 break;
733 case 0x01c00008: /* MXCC stream data register 1 */
734 if (size == 8) {
735 env->mxccdata[1] = val;
736 } else {
737 qemu_log_mask(LOG_UNIMP,
738 "%08x: unimplemented access size: %d\n", addr,
739 size);
741 break;
742 case 0x01c00010: /* MXCC stream data register 2 */
743 if (size == 8) {
744 env->mxccdata[2] = val;
745 } else {
746 qemu_log_mask(LOG_UNIMP,
747 "%08x: unimplemented access size: %d\n", addr,
748 size);
750 break;
751 case 0x01c00018: /* MXCC stream data register 3 */
752 if (size == 8) {
753 env->mxccdata[3] = val;
754 } else {
755 qemu_log_mask(LOG_UNIMP,
756 "%08x: unimplemented access size: %d\n", addr,
757 size);
759 break;
760 case 0x01c00100: /* MXCC stream source */
761 if (size == 8) {
762 env->mxccregs[0] = val;
763 } else {
764 qemu_log_mask(LOG_UNIMP,
765 "%08x: unimplemented access size: %d\n", addr,
766 size);
768 env->mxccdata[0] = ldq_phys(cs->as,
769 (env->mxccregs[0] & 0xffffffffULL) +
771 env->mxccdata[1] = ldq_phys(cs->as,
772 (env->mxccregs[0] & 0xffffffffULL) +
774 env->mxccdata[2] = ldq_phys(cs->as,
775 (env->mxccregs[0] & 0xffffffffULL) +
776 16);
777 env->mxccdata[3] = ldq_phys(cs->as,
778 (env->mxccregs[0] & 0xffffffffULL) +
779 24);
780 break;
781 case 0x01c00200: /* MXCC stream destination */
782 if (size == 8) {
783 env->mxccregs[1] = val;
784 } else {
785 qemu_log_mask(LOG_UNIMP,
786 "%08x: unimplemented access size: %d\n", addr,
787 size);
789 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 0,
790 env->mxccdata[0]);
791 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 8,
792 env->mxccdata[1]);
793 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16,
794 env->mxccdata[2]);
795 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24,
796 env->mxccdata[3]);
797 break;
798 case 0x01c00a00: /* MXCC control register */
799 if (size == 8) {
800 env->mxccregs[3] = val;
801 } else {
802 qemu_log_mask(LOG_UNIMP,
803 "%08x: unimplemented access size: %d\n", addr,
804 size);
806 break;
807 case 0x01c00a04: /* MXCC control register */
808 if (size == 4) {
809 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
810 | val;
811 } else {
812 qemu_log_mask(LOG_UNIMP,
813 "%08x: unimplemented access size: %d\n", addr,
814 size);
816 break;
817 case 0x01c00e00: /* MXCC error register */
818 /* writing a 1 bit clears the error */
819 if (size == 8) {
820 env->mxccregs[6] &= ~val;
821 } else {
822 qemu_log_mask(LOG_UNIMP,
823 "%08x: unimplemented access size: %d\n", addr,
824 size);
826 break;
827 case 0x01c00f00: /* MBus port address register */
828 if (size == 8) {
829 env->mxccregs[7] = val;
830 } else {
831 qemu_log_mask(LOG_UNIMP,
832 "%08x: unimplemented access size: %d\n", addr,
833 size);
835 break;
836 default:
837 qemu_log_mask(LOG_UNIMP,
838 "%08x: unimplemented address, size: %d\n", addr,
839 size);
840 break;
842 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
843 asi, size, addr, val);
844 #ifdef DEBUG_MXCC
845 dump_mxcc(env);
846 #endif
847 break;
848 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */
849 case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */
851 int mmulev;
853 mmulev = (addr >> 8) & 15;
854 DPRINTF_MMU("mmu flush level %d\n", mmulev);
855 switch (mmulev) {
856 case 0: /* flush page */
857 tlb_flush_page(CPU(cpu), addr & 0xfffff000);
858 break;
859 case 1: /* flush segment (256k) */
860 case 2: /* flush region (16M) */
861 case 3: /* flush context (4G) */
862 case 4: /* flush entire */
863 tlb_flush(CPU(cpu), 1);
864 break;
865 default:
866 break;
868 #ifdef DEBUG_MMU
869 dump_mmu(stdout, fprintf, env);
870 #endif
872 break;
873 case ASI_M_MMUREGS: /* write MMU regs */
874 case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */
876 int reg = (addr >> 8) & 0x1f;
877 uint32_t oldreg;
879 oldreg = env->mmuregs[reg];
880 switch (reg) {
881 case 0: /* Control Register */
882 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
883 (val & 0x00ffffff);
884 /* Mappings generated during no-fault mode or MMU
885 disabled mode are invalid in normal mode */
886 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
887 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm))) {
888 tlb_flush(CPU(cpu), 1);
890 break;
891 case 1: /* Context Table Pointer Register */
892 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
893 break;
894 case 2: /* Context Register */
895 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
896 if (oldreg != env->mmuregs[reg]) {
897 /* we flush when the MMU context changes because
898 QEMU has no MMU context support */
899 tlb_flush(CPU(cpu), 1);
901 break;
902 case 3: /* Synchronous Fault Status Register with Clear */
903 case 4: /* Synchronous Fault Address Register */
904 break;
905 case 0x10: /* TLB Replacement Control Register */
906 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
907 break;
908 case 0x13: /* Synchronous Fault Status Register with Read
909 and Clear */
910 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
911 break;
912 case 0x14: /* Synchronous Fault Address Register */
913 env->mmuregs[4] = val;
914 break;
915 default:
916 env->mmuregs[reg] = val;
917 break;
919 if (oldreg != env->mmuregs[reg]) {
920 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
921 reg, oldreg, env->mmuregs[reg]);
923 #ifdef DEBUG_MMU
924 dump_mmu(stdout, fprintf, env);
925 #endif
927 break;
928 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
929 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
930 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
931 break;
932 case ASI_USERDATA: /* User data access */
933 switch (size) {
934 case 1:
935 cpu_stb_user(env, addr, val);
936 break;
937 case 2:
938 cpu_stw_user(env, addr, val);
939 break;
940 default:
941 case 4:
942 cpu_stl_user(env, addr, val);
943 break;
944 case 8:
945 cpu_stq_user(env, addr, val);
946 break;
948 break;
949 case ASI_KERNELDATA: /* Supervisor data access */
950 case ASI_P:
951 switch (size) {
952 case 1:
953 cpu_stb_kernel(env, addr, val);
954 break;
955 case 2:
956 cpu_stw_kernel(env, addr, val);
957 break;
958 default:
959 case 4:
960 cpu_stl_kernel(env, addr, val);
961 break;
962 case 8:
963 cpu_stq_kernel(env, addr, val);
964 break;
966 break;
967 case ASI_M_TXTC_TAG: /* I-cache tag */
968 case ASI_M_TXTC_DATA: /* I-cache data */
969 case ASI_M_DATAC_TAG: /* D-cache tag */
970 case ASI_M_DATAC_DATA: /* D-cache data */
971 case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */
972 case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */
973 case ASI_M_FLUSH_REGION: /* I/D-cache flush region */
974 case ASI_M_FLUSH_CTX: /* I/D-cache flush context */
975 case ASI_M_FLUSH_USER: /* I/D-cache flush user */
976 break;
977 case ASI_M_BCOPY: /* Block copy, sta access */
979 /* val = src
980 addr = dst
981 copy 32 bytes */
982 unsigned int i;
983 uint32_t src = val & ~3, dst = addr & ~3, temp;
985 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
986 temp = cpu_ldl_kernel(env, src);
987 cpu_stl_kernel(env, dst, temp);
990 break;
991 case ASI_M_BFILL: /* Block fill, stda access */
993 /* addr = dst
994 fill 32 bytes with val */
995 unsigned int i;
996 uint32_t dst = addr & ~7;
998 for (i = 0; i < 32; i += 8, dst += 8) {
999 cpu_stq_kernel(env, dst, val);
1002 break;
1003 case ASI_M_BYPASS: /* MMU passthrough */
1004 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1006 switch (size) {
1007 case 1:
1008 stb_phys(cs->as, addr, val);
1009 break;
1010 case 2:
1011 stw_phys(cs->as, addr, val);
1012 break;
1013 case 4:
1014 default:
1015 stl_phys(cs->as, addr, val);
1016 break;
1017 case 8:
1018 stq_phys(cs->as, addr, val);
1019 break;
1022 break;
1023 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1025 switch (size) {
1026 case 1:
1027 stb_phys(cs->as, (hwaddr)addr
1028 | ((hwaddr)(asi & 0xf) << 32), val);
1029 break;
1030 case 2:
1031 stw_phys(cs->as, (hwaddr)addr
1032 | ((hwaddr)(asi & 0xf) << 32), val);
1033 break;
1034 case 4:
1035 default:
1036 stl_phys(cs->as, (hwaddr)addr
1037 | ((hwaddr)(asi & 0xf) << 32), val);
1038 break;
1039 case 8:
1040 stq_phys(cs->as, (hwaddr)addr
1041 | ((hwaddr)(asi & 0xf) << 32), val);
1042 break;
1045 break;
1046 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
1047 case 0x31: /* store buffer data, Ross RT620 I-cache flush or
1048 Turbosparc snoop RAM */
1049 case 0x32: /* store buffer control or Turbosparc page table
1050 descriptor diagnostic */
1051 case 0x36: /* I-cache flash clear */
1052 case 0x37: /* D-cache flash clear */
1053 break;
1054 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1056 int reg = (addr >> 8) & 3;
1058 switch (reg) {
1059 case 0: /* Breakpoint Value (Addr) */
1060 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1061 break;
1062 case 1: /* Breakpoint Mask */
1063 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1064 break;
1065 case 2: /* Breakpoint Control */
1066 env->mmubpregs[reg] = (val & 0x7fULL);
1067 break;
1068 case 3: /* Breakpoint Status */
1069 env->mmubpregs[reg] = (val & 0xfULL);
1070 break;
1072 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
1073 env->mmuregs[reg]);
1075 break;
1076 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1077 env->mmubpctrv = val & 0xffffffff;
1078 break;
1079 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1080 env->mmubpctrc = val & 0x3;
1081 break;
1082 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1083 env->mmubpctrs = val & 0x3;
1084 break;
1085 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1086 env->mmubpaction = val & 0x1fff;
1087 break;
1088 case ASI_USERTXT: /* User code access, XXX */
1089 case ASI_KERNELTXT: /* Supervisor code access, XXX */
1090 default:
1091 cpu_unassigned_access(CPU(sparc_env_get_cpu(env)),
1092 addr, true, false, asi, size);
1093 break;
1095 #ifdef DEBUG_ASI
1096 dump_asi("write", addr, asi, size, val);
1097 #endif
1100 #endif /* CONFIG_USER_ONLY */
1101 #else /* TARGET_SPARC64 */
1103 #ifdef CONFIG_USER_ONLY
1104 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1105 int asi, uint32_t memop)
1107 int size = 1 << (memop & MO_SIZE);
1108 int sign = memop & MO_SIGN;
1109 uint64_t ret = 0;
1110 #if defined(DEBUG_ASI)
1111 target_ulong last_addr = addr;
1112 #endif
1114 if (asi < 0x80) {
1115 helper_raise_exception(env, TT_PRIV_ACT);
1118 helper_check_align(env, addr, size - 1);
1119 addr = asi_address_mask(env, asi, addr);
1121 switch (asi) {
1122 case ASI_PNF: /* Primary no-fault */
1123 case ASI_PNFL: /* Primary no-fault LE */
1124 if (page_check_range(addr, size, PAGE_READ) == -1) {
1125 #ifdef DEBUG_ASI
1126 dump_asi("read ", last_addr, asi, size, ret);
1127 #endif
1128 return 0;
1130 /* Fall through */
1131 case ASI_P: /* Primary */
1132 case ASI_PL: /* Primary LE */
1134 switch (size) {
1135 case 1:
1136 ret = cpu_ldub_data(env, addr);
1137 break;
1138 case 2:
1139 ret = cpu_lduw_data(env, addr);
1140 break;
1141 case 4:
1142 ret = cpu_ldl_data(env, addr);
1143 break;
1144 default:
1145 case 8:
1146 ret = cpu_ldq_data(env, addr);
1147 break;
1150 break;
1151 case ASI_SNF: /* Secondary no-fault */
1152 case ASI_SNFL: /* Secondary no-fault LE */
1153 if (page_check_range(addr, size, PAGE_READ) == -1) {
1154 #ifdef DEBUG_ASI
1155 dump_asi("read ", last_addr, asi, size, ret);
1156 #endif
1157 return 0;
1159 /* Fall through */
1160 case ASI_S: /* Secondary */
1161 case ASI_SL: /* Secondary LE */
1162 /* XXX */
1163 break;
1164 default:
1165 break;
1168 /* Convert from little endian */
1169 switch (asi) {
1170 case ASI_PL: /* Primary LE */
1171 case ASI_SL: /* Secondary LE */
1172 case ASI_PNFL: /* Primary no-fault LE */
1173 case ASI_SNFL: /* Secondary no-fault LE */
1174 switch (size) {
1175 case 2:
1176 ret = bswap16(ret);
1177 break;
1178 case 4:
1179 ret = bswap32(ret);
1180 break;
1181 case 8:
1182 ret = bswap64(ret);
1183 break;
1184 default:
1185 break;
1187 default:
1188 break;
1191 /* Convert to signed number */
1192 if (sign) {
1193 switch (size) {
1194 case 1:
1195 ret = (int8_t) ret;
1196 break;
1197 case 2:
1198 ret = (int16_t) ret;
1199 break;
1200 case 4:
1201 ret = (int32_t) ret;
1202 break;
1203 default:
1204 break;
1207 #ifdef DEBUG_ASI
1208 dump_asi("read ", last_addr, asi, size, ret);
1209 #endif
1210 return ret;
1213 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1214 int asi, uint32_t memop)
1216 int size = 1 << (memop & MO_SIZE);
1217 #ifdef DEBUG_ASI
1218 dump_asi("write", addr, asi, size, val);
1219 #endif
1220 if (asi < 0x80) {
1221 helper_raise_exception(env, TT_PRIV_ACT);
1224 helper_check_align(env, addr, size - 1);
1225 addr = asi_address_mask(env, asi, addr);
1227 /* Convert to little endian */
1228 switch (asi) {
1229 case ASI_PL: /* Primary LE */
1230 case ASI_SL: /* Secondary LE */
1231 switch (size) {
1232 case 2:
1233 val = bswap16(val);
1234 break;
1235 case 4:
1236 val = bswap32(val);
1237 break;
1238 case 8:
1239 val = bswap64(val);
1240 break;
1241 default:
1242 break;
1244 default:
1245 break;
1248 switch (asi) {
1249 case ASI_P: /* Primary */
1250 case ASI_PL: /* Primary LE */
1252 switch (size) {
1253 case 1:
1254 cpu_stb_data(env, addr, val);
1255 break;
1256 case 2:
1257 cpu_stw_data(env, addr, val);
1258 break;
1259 case 4:
1260 cpu_stl_data(env, addr, val);
1261 break;
1262 case 8:
1263 default:
1264 cpu_stq_data(env, addr, val);
1265 break;
1268 break;
1269 case ASI_S: /* Secondary */
1270 case ASI_SL: /* Secondary LE */
1271 /* XXX */
1272 return;
1274 case ASI_PNF: /* Primary no-fault, RO */
1275 case ASI_SNF: /* Secondary no-fault, RO */
1276 case ASI_PNFL: /* Primary no-fault LE, RO */
1277 case ASI_SNFL: /* Secondary no-fault LE, RO */
1278 default:
1279 helper_raise_exception(env, TT_DATA_ACCESS);
1280 return;
1284 #else /* CONFIG_USER_ONLY */
1286 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1287 int asi, uint32_t memop)
1289 int size = 1 << (memop & MO_SIZE);
1290 int sign = memop & MO_SIGN;
1291 CPUState *cs = CPU(sparc_env_get_cpu(env));
1292 uint64_t ret = 0;
1293 #if defined(DEBUG_ASI)
1294 target_ulong last_addr = addr;
1295 #endif
1297 asi &= 0xff;
1299 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1300 || (cpu_has_hypervisor(env)
1301 && asi >= 0x30 && asi < 0x80
1302 && !(env->hpstate & HS_PRIV))) {
1303 helper_raise_exception(env, TT_PRIV_ACT);
1306 helper_check_align(env, addr, size - 1);
1307 addr = asi_address_mask(env, asi, addr);
1309 /* process nonfaulting loads first */
1310 if ((asi & 0xf6) == 0x82) {
1311 int mmu_idx;
1313 /* secondary space access has lowest asi bit equal to 1 */
1314 if (env->pstate & PS_PRIV) {
1315 mmu_idx = (asi & 1) ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX;
1316 } else {
1317 mmu_idx = (asi & 1) ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX;
1320 if (cpu_get_phys_page_nofault(env, addr, mmu_idx) == -1ULL) {
1321 #ifdef DEBUG_ASI
1322 dump_asi("read ", last_addr, asi, size, ret);
1323 #endif
1324 /* env->exception_index is set in get_physical_address_data(). */
1325 helper_raise_exception(env, cs->exception_index);
1328 /* convert nonfaulting load ASIs to normal load ASIs */
1329 asi &= ~0x02;
1332 switch (asi) {
1333 case ASI_AIUP: /* As if user primary */
1334 case ASI_AIUS: /* As if user secondary */
1335 case ASI_AIUPL: /* As if user primary LE */
1336 case ASI_AIUSL: /* As if user secondary LE */
1337 case ASI_P: /* Primary */
1338 case ASI_S: /* Secondary */
1339 case ASI_PL: /* Primary LE */
1340 case ASI_SL: /* Secondary LE */
1341 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1342 if (cpu_hypervisor_mode(env)) {
1343 switch (size) {
1344 case 1:
1345 ret = cpu_ldub_hypv(env, addr);
1346 break;
1347 case 2:
1348 ret = cpu_lduw_hypv(env, addr);
1349 break;
1350 case 4:
1351 ret = cpu_ldl_hypv(env, addr);
1352 break;
1353 default:
1354 case 8:
1355 ret = cpu_ldq_hypv(env, addr);
1356 break;
1358 } else {
1359 /* secondary space access has lowest asi bit equal to 1 */
1360 if (asi & 1) {
1361 switch (size) {
1362 case 1:
1363 ret = cpu_ldub_kernel_secondary(env, addr);
1364 break;
1365 case 2:
1366 ret = cpu_lduw_kernel_secondary(env, addr);
1367 break;
1368 case 4:
1369 ret = cpu_ldl_kernel_secondary(env, addr);
1370 break;
1371 default:
1372 case 8:
1373 ret = cpu_ldq_kernel_secondary(env, addr);
1374 break;
1376 } else {
1377 switch (size) {
1378 case 1:
1379 ret = cpu_ldub_kernel(env, addr);
1380 break;
1381 case 2:
1382 ret = cpu_lduw_kernel(env, addr);
1383 break;
1384 case 4:
1385 ret = cpu_ldl_kernel(env, addr);
1386 break;
1387 default:
1388 case 8:
1389 ret = cpu_ldq_kernel(env, addr);
1390 break;
1394 } else {
1395 /* secondary space access has lowest asi bit equal to 1 */
1396 if (asi & 1) {
1397 switch (size) {
1398 case 1:
1399 ret = cpu_ldub_user_secondary(env, addr);
1400 break;
1401 case 2:
1402 ret = cpu_lduw_user_secondary(env, addr);
1403 break;
1404 case 4:
1405 ret = cpu_ldl_user_secondary(env, addr);
1406 break;
1407 default:
1408 case 8:
1409 ret = cpu_ldq_user_secondary(env, addr);
1410 break;
1412 } else {
1413 switch (size) {
1414 case 1:
1415 ret = cpu_ldub_user(env, addr);
1416 break;
1417 case 2:
1418 ret = cpu_lduw_user(env, addr);
1419 break;
1420 case 4:
1421 ret = cpu_ldl_user(env, addr);
1422 break;
1423 default:
1424 case 8:
1425 ret = cpu_ldq_user(env, addr);
1426 break;
1430 break;
1431 case ASI_REAL: /* Bypass */
1432 case ASI_REAL_IO: /* Bypass, non-cacheable */
1433 case ASI_REAL_L: /* Bypass LE */
1434 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1436 switch (size) {
1437 case 1:
1438 ret = ldub_phys(cs->as, addr);
1439 break;
1440 case 2:
1441 ret = lduw_phys(cs->as, addr);
1442 break;
1443 case 4:
1444 ret = ldl_phys(cs->as, addr);
1445 break;
1446 default:
1447 case 8:
1448 ret = ldq_phys(cs->as, addr);
1449 break;
1451 break;
1453 case ASI_N: /* Nucleus */
1454 case ASI_NL: /* Nucleus Little Endian (LE) */
1456 switch (size) {
1457 case 1:
1458 ret = cpu_ldub_nucleus(env, addr);
1459 break;
1460 case 2:
1461 ret = cpu_lduw_nucleus(env, addr);
1462 break;
1463 case 4:
1464 ret = cpu_ldl_nucleus(env, addr);
1465 break;
1466 default:
1467 case 8:
1468 ret = cpu_ldq_nucleus(env, addr);
1469 break;
1471 break;
1473 case ASI_UPA_CONFIG: /* UPA config */
1474 /* XXX */
1475 break;
1476 case ASI_LSU_CONTROL: /* LSU */
1477 ret = env->lsu;
1478 break;
1479 case ASI_IMMU: /* I-MMU regs */
1481 int reg = (addr >> 3) & 0xf;
1483 if (reg == 0) {
1484 /* I-TSB Tag Target register */
1485 ret = ultrasparc_tag_target(env->immu.tag_access);
1486 } else {
1487 ret = env->immuregs[reg];
1490 break;
1492 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */
1494 /* env->immuregs[5] holds I-MMU TSB register value
1495 env->immuregs[6] holds I-MMU Tag Access register value */
1496 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
1497 8*1024);
1498 break;
1500 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */
1502 /* env->immuregs[5] holds I-MMU TSB register value
1503 env->immuregs[6] holds I-MMU Tag Access register value */
1504 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
1505 64*1024);
1506 break;
1508 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1510 int reg = (addr >> 3) & 0x3f;
1512 ret = env->itlb[reg].tte;
1513 break;
1515 case ASI_ITLB_TAG_READ: /* I-MMU tag read */
1517 int reg = (addr >> 3) & 0x3f;
1519 ret = env->itlb[reg].tag;
1520 break;
1522 case ASI_DMMU: /* D-MMU regs */
1524 int reg = (addr >> 3) & 0xf;
1526 if (reg == 0) {
1527 /* D-TSB Tag Target register */
1528 ret = ultrasparc_tag_target(env->dmmu.tag_access);
1529 } else {
1530 ret = env->dmmuregs[reg];
1532 break;
1534 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */
1536 /* env->dmmuregs[5] holds D-MMU TSB register value
1537 env->dmmuregs[6] holds D-MMU Tag Access register value */
1538 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
1539 8*1024);
1540 break;
1542 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */
1544 /* env->dmmuregs[5] holds D-MMU TSB register value
1545 env->dmmuregs[6] holds D-MMU Tag Access register value */
1546 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
1547 64*1024);
1548 break;
1550 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1552 int reg = (addr >> 3) & 0x3f;
1554 ret = env->dtlb[reg].tte;
1555 break;
1557 case ASI_DTLB_TAG_READ: /* D-MMU tag read */
1559 int reg = (addr >> 3) & 0x3f;
1561 ret = env->dtlb[reg].tag;
1562 break;
1564 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1565 break;
1566 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1567 ret = env->ivec_status;
1568 break;
1569 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1571 int reg = (addr >> 4) & 0x3;
1572 if (reg < 3) {
1573 ret = env->ivec_data[reg];
1575 break;
1577 case ASI_DCACHE_DATA: /* D-cache data */
1578 case ASI_DCACHE_TAG: /* D-cache tag access */
1579 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1580 case ASI_AFSR: /* E-cache asynchronous fault status */
1581 case ASI_AFAR: /* E-cache asynchronous fault address */
1582 case ASI_EC_TAG_DATA: /* E-cache tag data */
1583 case ASI_IC_INSTR: /* I-cache instruction access */
1584 case ASI_IC_TAG: /* I-cache tag access */
1585 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1586 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1587 case ASI_EC_W: /* E-cache tag */
1588 case ASI_EC_R: /* E-cache tag */
1589 break;
1590 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */
1591 case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */
1592 case ASI_IMMU_DEMAP: /* I-MMU demap, WO */
1593 case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */
1594 case ASI_DMMU_DEMAP: /* D-MMU demap, WO */
1595 case ASI_INTR_W: /* Interrupt vector, WO */
1596 default:
1597 cpu_unassigned_access(cs, addr, false, false, 1, size);
1598 ret = 0;
1599 break;
1601 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1602 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1603 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1604 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1605 case ASI_TWINX_REAL: /* Real address, twinx */
1606 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1607 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1608 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1609 case ASI_TWINX_N: /* Nucleus, twinx */
1610 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1611 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1612 case ASI_TWINX_P: /* Primary, twinx */
1613 case ASI_TWINX_PL: /* Primary, twinx, LE */
1614 case ASI_TWINX_S: /* Secondary, twinx */
1615 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1616 /* These are all 128-bit atomic; only ldda (now ldtxa) allowed */
1617 helper_raise_exception(env, TT_ILL_INSN);
1618 return 0;
1621 /* Convert from little endian */
1622 switch (asi) {
1623 case ASI_NL: /* Nucleus Little Endian (LE) */
1624 case ASI_AIUPL: /* As if user primary LE */
1625 case ASI_AIUSL: /* As if user secondary LE */
1626 case ASI_REAL_L: /* Bypass LE */
1627 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1628 case ASI_PL: /* Primary LE */
1629 case ASI_SL: /* Secondary LE */
1630 switch(size) {
1631 case 2:
1632 ret = bswap16(ret);
1633 break;
1634 case 4:
1635 ret = bswap32(ret);
1636 break;
1637 case 8:
1638 ret = bswap64(ret);
1639 break;
1640 default:
1641 break;
1643 default:
1644 break;
1647 /* Convert to signed number */
1648 if (sign) {
1649 switch (size) {
1650 case 1:
1651 ret = (int8_t) ret;
1652 break;
1653 case 2:
1654 ret = (int16_t) ret;
1655 break;
1656 case 4:
1657 ret = (int32_t) ret;
1658 break;
1659 default:
1660 break;
1663 #ifdef DEBUG_ASI
1664 dump_asi("read ", last_addr, asi, size, ret);
1665 #endif
1666 return ret;
1669 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1670 int asi, uint32_t memop)
1672 int size = 1 << (memop & MO_SIZE);
1673 SPARCCPU *cpu = sparc_env_get_cpu(env);
1674 CPUState *cs = CPU(cpu);
1676 #ifdef DEBUG_ASI
1677 dump_asi("write", addr, asi, size, val);
1678 #endif
1680 asi &= 0xff;
1682 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1683 || (cpu_has_hypervisor(env)
1684 && asi >= 0x30 && asi < 0x80
1685 && !(env->hpstate & HS_PRIV))) {
1686 helper_raise_exception(env, TT_PRIV_ACT);
1689 helper_check_align(env, addr, size - 1);
1690 addr = asi_address_mask(env, asi, addr);
1692 /* Convert to little endian */
1693 switch (asi) {
1694 case ASI_NL: /* Nucleus Little Endian (LE) */
1695 case ASI_AIUPL: /* As if user primary LE */
1696 case ASI_AIUSL: /* As if user secondary LE */
1697 case ASI_REAL_L: /* Bypass LE */
1698 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1699 case ASI_PL: /* Primary LE */
1700 case ASI_SL: /* Secondary LE */
1701 switch (size) {
1702 case 2:
1703 val = bswap16(val);
1704 break;
1705 case 4:
1706 val = bswap32(val);
1707 break;
1708 case 8:
1709 val = bswap64(val);
1710 break;
1711 default:
1712 break;
1714 default:
1715 break;
1718 switch (asi) {
1719 case ASI_AIUP: /* As if user primary */
1720 case ASI_AIUS: /* As if user secondary */
1721 case ASI_AIUPL: /* As if user primary LE */
1722 case ASI_AIUSL: /* As if user secondary LE */
1723 case ASI_P: /* Primary */
1724 case ASI_S: /* Secondary */
1725 case ASI_PL: /* Primary LE */
1726 case ASI_SL: /* Secondary LE */
1727 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1728 if (cpu_hypervisor_mode(env)) {
1729 switch (size) {
1730 case 1:
1731 cpu_stb_hypv(env, addr, val);
1732 break;
1733 case 2:
1734 cpu_stw_hypv(env, addr, val);
1735 break;
1736 case 4:
1737 cpu_stl_hypv(env, addr, val);
1738 break;
1739 case 8:
1740 default:
1741 cpu_stq_hypv(env, addr, val);
1742 break;
1744 } else {
1745 /* secondary space access has lowest asi bit equal to 1 */
1746 if (asi & 1) {
1747 switch (size) {
1748 case 1:
1749 cpu_stb_kernel_secondary(env, addr, val);
1750 break;
1751 case 2:
1752 cpu_stw_kernel_secondary(env, addr, val);
1753 break;
1754 case 4:
1755 cpu_stl_kernel_secondary(env, addr, val);
1756 break;
1757 case 8:
1758 default:
1759 cpu_stq_kernel_secondary(env, addr, val);
1760 break;
1762 } else {
1763 switch (size) {
1764 case 1:
1765 cpu_stb_kernel(env, addr, val);
1766 break;
1767 case 2:
1768 cpu_stw_kernel(env, addr, val);
1769 break;
1770 case 4:
1771 cpu_stl_kernel(env, addr, val);
1772 break;
1773 case 8:
1774 default:
1775 cpu_stq_kernel(env, addr, val);
1776 break;
1780 } else {
1781 /* secondary space access has lowest asi bit equal to 1 */
1782 if (asi & 1) {
1783 switch (size) {
1784 case 1:
1785 cpu_stb_user_secondary(env, addr, val);
1786 break;
1787 case 2:
1788 cpu_stw_user_secondary(env, addr, val);
1789 break;
1790 case 4:
1791 cpu_stl_user_secondary(env, addr, val);
1792 break;
1793 case 8:
1794 default:
1795 cpu_stq_user_secondary(env, addr, val);
1796 break;
1798 } else {
1799 switch (size) {
1800 case 1:
1801 cpu_stb_user(env, addr, val);
1802 break;
1803 case 2:
1804 cpu_stw_user(env, addr, val);
1805 break;
1806 case 4:
1807 cpu_stl_user(env, addr, val);
1808 break;
1809 case 8:
1810 default:
1811 cpu_stq_user(env, addr, val);
1812 break;
1816 break;
1817 case ASI_REAL: /* Bypass */
1818 case ASI_REAL_IO: /* Bypass, non-cacheable */
1819 case ASI_REAL_L: /* Bypass LE */
1820 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1822 switch (size) {
1823 case 1:
1824 stb_phys(cs->as, addr, val);
1825 break;
1826 case 2:
1827 stw_phys(cs->as, addr, val);
1828 break;
1829 case 4:
1830 stl_phys(cs->as, addr, val);
1831 break;
1832 case 8:
1833 default:
1834 stq_phys(cs->as, addr, val);
1835 break;
1838 return;
1839 case ASI_N: /* Nucleus */
1840 case ASI_NL: /* Nucleus Little Endian (LE) */
1842 switch (size) {
1843 case 1:
1844 cpu_stb_nucleus(env, addr, val);
1845 break;
1846 case 2:
1847 cpu_stw_nucleus(env, addr, val);
1848 break;
1849 case 4:
1850 cpu_stl_nucleus(env, addr, val);
1851 break;
1852 default:
1853 case 8:
1854 cpu_stq_nucleus(env, addr, val);
1855 break;
1857 break;
1860 case ASI_UPA_CONFIG: /* UPA config */
1861 /* XXX */
1862 return;
1863 case ASI_LSU_CONTROL: /* LSU */
1865 uint64_t oldreg;
1867 oldreg = env->lsu;
1868 env->lsu = val & (DMMU_E | IMMU_E);
1869 /* Mappings generated during D/I MMU disabled mode are
1870 invalid in normal mode */
1871 if (oldreg != env->lsu) {
1872 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
1873 oldreg, env->lsu);
1874 #ifdef DEBUG_MMU
1875 dump_mmu(stdout, fprintf, env);
1876 #endif
1877 tlb_flush(CPU(cpu), 1);
1879 return;
1881 case ASI_IMMU: /* I-MMU regs */
1883 int reg = (addr >> 3) & 0xf;
1884 uint64_t oldreg;
1886 oldreg = env->immuregs[reg];
1887 switch (reg) {
1888 case 0: /* RO */
1889 return;
1890 case 1: /* Not in I-MMU */
1891 case 2:
1892 return;
1893 case 3: /* SFSR */
1894 if ((val & 1) == 0) {
1895 val = 0; /* Clear SFSR */
1897 env->immu.sfsr = val;
1898 break;
1899 case 4: /* RO */
1900 return;
1901 case 5: /* TSB access */
1902 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
1903 PRIx64 "\n", env->immu.tsb, val);
1904 env->immu.tsb = val;
1905 break;
1906 case 6: /* Tag access */
1907 env->immu.tag_access = val;
1908 break;
1909 case 7:
1910 case 8:
1911 return;
1912 default:
1913 break;
1916 if (oldreg != env->immuregs[reg]) {
1917 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1918 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1920 #ifdef DEBUG_MMU
1921 dump_mmu(stdout, fprintf, env);
1922 #endif
1923 return;
1925 case ASI_ITLB_DATA_IN: /* I-MMU data in */
1926 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
1927 return;
1928 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1930 /* TODO: auto demap */
1932 unsigned int i = (addr >> 3) & 0x3f;
1934 replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
1936 #ifdef DEBUG_MMU
1937 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
1938 dump_mmu(stdout, fprintf, env);
1939 #endif
1940 return;
1942 case ASI_IMMU_DEMAP: /* I-MMU demap */
1943 demap_tlb(env->itlb, addr, "immu", env);
1944 return;
1945 case ASI_DMMU: /* D-MMU regs */
1947 int reg = (addr >> 3) & 0xf;
1948 uint64_t oldreg;
1950 oldreg = env->dmmuregs[reg];
1951 switch (reg) {
1952 case 0: /* RO */
1953 case 4:
1954 return;
1955 case 3: /* SFSR */
1956 if ((val & 1) == 0) {
1957 val = 0; /* Clear SFSR, Fault address */
1958 env->dmmu.sfar = 0;
1960 env->dmmu.sfsr = val;
1961 break;
1962 case 1: /* Primary context */
1963 env->dmmu.mmu_primary_context = val;
1964 /* can be optimized to only flush MMU_USER_IDX
1965 and MMU_KERNEL_IDX entries */
1966 tlb_flush(CPU(cpu), 1);
1967 break;
1968 case 2: /* Secondary context */
1969 env->dmmu.mmu_secondary_context = val;
1970 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
1971 and MMU_KERNEL_SECONDARY_IDX entries */
1972 tlb_flush(CPU(cpu), 1);
1973 break;
1974 case 5: /* TSB access */
1975 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
1976 PRIx64 "\n", env->dmmu.tsb, val);
1977 env->dmmu.tsb = val;
1978 break;
1979 case 6: /* Tag access */
1980 env->dmmu.tag_access = val;
1981 break;
1982 case 7: /* Virtual Watchpoint */
1983 case 8: /* Physical Watchpoint */
1984 default:
1985 env->dmmuregs[reg] = val;
1986 break;
1989 if (oldreg != env->dmmuregs[reg]) {
1990 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1991 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
1993 #ifdef DEBUG_MMU
1994 dump_mmu(stdout, fprintf, env);
1995 #endif
1996 return;
1998 case ASI_DTLB_DATA_IN: /* D-MMU data in */
1999 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
2000 return;
2001 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
2003 unsigned int i = (addr >> 3) & 0x3f;
2005 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
2007 #ifdef DEBUG_MMU
2008 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
2009 dump_mmu(stdout, fprintf, env);
2010 #endif
2011 return;
2013 case ASI_DMMU_DEMAP: /* D-MMU demap */
2014 demap_tlb(env->dtlb, addr, "dmmu", env);
2015 return;
2016 case ASI_INTR_RECEIVE: /* Interrupt data receive */
2017 env->ivec_status = val & 0x20;
2018 return;
2019 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
2020 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
2021 case ASI_TWINX_AIUP: /* As if user primary, twinx */
2022 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
2023 case ASI_TWINX_REAL: /* Real address, twinx */
2024 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
2025 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
2026 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2027 case ASI_TWINX_N: /* Nucleus, twinx */
2028 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
2029 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
2030 case ASI_TWINX_P: /* Primary, twinx */
2031 case ASI_TWINX_PL: /* Primary, twinx, LE */
2032 case ASI_TWINX_S: /* Secondary, twinx */
2033 case ASI_TWINX_SL: /* Secondary, twinx, LE */
2034 /* Only stda allowed */
2035 helper_raise_exception(env, TT_ILL_INSN);
2036 return;
2037 case ASI_DCACHE_DATA: /* D-cache data */
2038 case ASI_DCACHE_TAG: /* D-cache tag access */
2039 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
2040 case ASI_AFSR: /* E-cache asynchronous fault status */
2041 case ASI_AFAR: /* E-cache asynchronous fault address */
2042 case ASI_EC_TAG_DATA: /* E-cache tag data */
2043 case ASI_IC_INSTR: /* I-cache instruction access */
2044 case ASI_IC_TAG: /* I-cache tag access */
2045 case ASI_IC_PRE_DECODE: /* I-cache predecode */
2046 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
2047 case ASI_EC_W: /* E-cache tag */
2048 case ASI_EC_R: /* E-cache tag */
2049 return;
2050 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */
2051 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */
2052 case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */
2053 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */
2054 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */
2055 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */
2056 case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */
2057 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
2058 case ASI_INTR_R: /* Incoming interrupt vector, RO */
2059 case ASI_PNF: /* Primary no-fault, RO */
2060 case ASI_SNF: /* Secondary no-fault, RO */
2061 case ASI_PNFL: /* Primary no-fault LE, RO */
2062 case ASI_SNFL: /* Secondary no-fault LE, RO */
2063 default:
2064 cpu_unassigned_access(cs, addr, true, false, 1, size);
2065 return;
2068 #endif /* CONFIG_USER_ONLY */
2070 /* 128-bit LDDA; result returned in QT0. */
2071 void helper_ldda_asi(CPUSPARCState *env, target_ulong addr, int asi)
2073 uint64_t h, l;
2075 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2076 || (cpu_has_hypervisor(env)
2077 && asi >= 0x30 && asi < 0x80
2078 && !(env->hpstate & HS_PRIV))) {
2079 helper_raise_exception(env, TT_PRIV_ACT);
2082 addr = asi_address_mask(env, asi, addr);
2084 switch (asi) {
2085 #if !defined(CONFIG_USER_ONLY)
2086 case ASI_TWINX_AIUP: /* As if user primary, twinx */
2087 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
2088 helper_check_align(env, addr, 0xf);
2089 h = cpu_ldq_user(env, addr);
2090 l = cpu_ldq_user(env, addr + 8);
2091 break;
2092 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
2093 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
2094 helper_check_align(env, addr, 0xf);
2095 h = cpu_ldq_user_secondary(env, addr);
2096 l = cpu_ldq_user_secondary(env, addr + 8);
2097 break;
2098 case ASI_TWINX_REAL: /* Real address, twinx */
2099 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2100 helper_check_align(env, addr, 0xf);
2102 CPUState *cs = CPU(sparc_env_get_cpu(env));
2103 h = ldq_phys(cs->as, addr);
2104 l = ldq_phys(cs->as, addr + 8);
2106 break;
2107 case ASI_NUCLEUS_QUAD_LDD:
2108 case ASI_NUCLEUS_QUAD_LDD_L:
2109 case ASI_TWINX_N: /* Nucleus, twinx */
2110 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
2111 helper_check_align(env, addr, 0xf);
2112 h = cpu_ldq_nucleus(env, addr);
2113 l = cpu_ldq_nucleus(env, addr + 8);
2114 break;
2115 case ASI_TWINX_S: /* Secondary, twinx */
2116 case ASI_TWINX_SL: /* Secondary, twinx, LE */
2117 if (!cpu_hypervisor_mode(env)) {
2118 helper_check_align(env, addr, 0xf);
2119 if (env->pstate & PS_PRIV) {
2120 h = cpu_ldq_kernel_secondary(env, addr);
2121 l = cpu_ldq_kernel_secondary(env, addr + 8);
2122 } else {
2123 h = cpu_ldq_user_secondary(env, addr);
2124 l = cpu_ldq_user_secondary(env, addr + 8);
2126 break;
2128 /* fallthru */
2129 case ASI_TWINX_P: /* Primary, twinx */
2130 case ASI_TWINX_PL: /* Primary, twinx, LE */
2131 helper_check_align(env, addr, 0xf);
2132 h = cpu_ldq_data(env, addr);
2133 l = cpu_ldq_data(env, addr + 8);
2134 break;
2135 #else
2136 case ASI_TWINX_P: /* Primary, twinx */
2137 case ASI_TWINX_PL: /* Primary, twinx, LE */
2138 case ASI_TWINX_S: /* Primary, twinx */
2139 case ASI_TWINX_SL: /* Primary, twinx, LE */
2140 /* ??? Should be available, but we need to implement
2141 an atomic 128-bit load. */
2142 helper_raise_exception(env, TT_PRIV_ACT);
2143 #endif
2144 default:
2145 /* Non-twinx asi, so this is the legacy ldda insn, which
2146 performs two word sized operations. */
2147 /* ??? The UA2011 manual recommends emulating this with
2148 a single 64-bit load. However, LE asis *are* treated
2149 as two 32-bit loads individually byte swapped. */
2150 helper_check_align(env, addr, 0x7);
2151 QT0.high = (uint32_t)helper_ld_asi(env, addr, asi, MO_UL);
2152 QT0.low = (uint32_t)helper_ld_asi(env, addr + 4, asi, MO_UL);
2153 return;
2156 if (asi & 8) {
2157 h = bswap64(h);
2158 l = bswap64(l);
2160 QT0.high = h;
2161 QT0.low = l;
2164 target_ulong helper_casx_asi(CPUSPARCState *env, target_ulong addr,
2165 target_ulong val1, target_ulong val2,
2166 uint32_t asi)
2168 target_ulong ret;
2170 ret = helper_ld_asi(env, addr, asi, MO_Q);
2171 if (val2 == ret) {
2172 helper_st_asi(env, addr, val1, asi, MO_Q);
2174 return ret;
2176 #endif /* TARGET_SPARC64 */
2178 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2179 target_ulong helper_cas_asi(CPUSPARCState *env, target_ulong addr,
2180 target_ulong val1, target_ulong val2, uint32_t asi)
2182 target_ulong ret;
2184 val2 &= 0xffffffffUL;
2185 ret = helper_ld_asi(env, addr, asi, MO_UL);
2186 ret &= 0xffffffffUL;
2187 if (val2 == ret) {
2188 helper_st_asi(env, addr, val1 & 0xffffffffUL, asi, MO_UL);
2190 return ret;
2192 #endif /* !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) */
2194 void helper_ldqf(CPUSPARCState *env, target_ulong addr, int mem_idx)
2196 /* XXX add 128 bit load */
2197 CPU_QuadU u;
2199 helper_check_align(env, addr, 7);
2200 #if !defined(CONFIG_USER_ONLY)
2201 switch (mem_idx) {
2202 case MMU_USER_IDX:
2203 u.ll.upper = cpu_ldq_user(env, addr);
2204 u.ll.lower = cpu_ldq_user(env, addr + 8);
2205 QT0 = u.q;
2206 break;
2207 case MMU_KERNEL_IDX:
2208 u.ll.upper = cpu_ldq_kernel(env, addr);
2209 u.ll.lower = cpu_ldq_kernel(env, addr + 8);
2210 QT0 = u.q;
2211 break;
2212 #ifdef TARGET_SPARC64
2213 case MMU_HYPV_IDX:
2214 u.ll.upper = cpu_ldq_hypv(env, addr);
2215 u.ll.lower = cpu_ldq_hypv(env, addr + 8);
2216 QT0 = u.q;
2217 break;
2218 #endif
2219 default:
2220 DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx);
2221 break;
2223 #else
2224 u.ll.upper = cpu_ldq_data(env, address_mask(env, addr));
2225 u.ll.lower = cpu_ldq_data(env, address_mask(env, addr + 8));
2226 QT0 = u.q;
2227 #endif
2230 void helper_stqf(CPUSPARCState *env, target_ulong addr, int mem_idx)
2232 /* XXX add 128 bit store */
2233 CPU_QuadU u;
2235 helper_check_align(env, addr, 7);
2236 #if !defined(CONFIG_USER_ONLY)
2237 switch (mem_idx) {
2238 case MMU_USER_IDX:
2239 u.q = QT0;
2240 cpu_stq_user(env, addr, u.ll.upper);
2241 cpu_stq_user(env, addr + 8, u.ll.lower);
2242 break;
2243 case MMU_KERNEL_IDX:
2244 u.q = QT0;
2245 cpu_stq_kernel(env, addr, u.ll.upper);
2246 cpu_stq_kernel(env, addr + 8, u.ll.lower);
2247 break;
2248 #ifdef TARGET_SPARC64
2249 case MMU_HYPV_IDX:
2250 u.q = QT0;
2251 cpu_stq_hypv(env, addr, u.ll.upper);
2252 cpu_stq_hypv(env, addr + 8, u.ll.lower);
2253 break;
2254 #endif
2255 default:
2256 DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx);
2257 break;
2259 #else
2260 u.q = QT0;
2261 cpu_stq_data(env, address_mask(env, addr), u.ll.upper);
2262 cpu_stq_data(env, address_mask(env, addr + 8), u.ll.lower);
2263 #endif
2266 #if !defined(CONFIG_USER_ONLY)
2267 #ifndef TARGET_SPARC64
2268 void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
2269 bool is_write, bool is_exec, int is_asi,
2270 unsigned size)
2272 SPARCCPU *cpu = SPARC_CPU(cs);
2273 CPUSPARCState *env = &cpu->env;
2274 int fault_type;
2276 #ifdef DEBUG_UNASSIGNED
2277 if (is_asi) {
2278 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
2279 " asi 0x%02x from " TARGET_FMT_lx "\n",
2280 is_exec ? "exec" : is_write ? "write" : "read", size,
2281 size == 1 ? "" : "s", addr, is_asi, env->pc);
2282 } else {
2283 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
2284 " from " TARGET_FMT_lx "\n",
2285 is_exec ? "exec" : is_write ? "write" : "read", size,
2286 size == 1 ? "" : "s", addr, env->pc);
2288 #endif
2289 /* Don't overwrite translation and access faults */
2290 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
2291 if ((fault_type > 4) || (fault_type == 0)) {
2292 env->mmuregs[3] = 0; /* Fault status register */
2293 if (is_asi) {
2294 env->mmuregs[3] |= 1 << 16;
2296 if (env->psrs) {
2297 env->mmuregs[3] |= 1 << 5;
2299 if (is_exec) {
2300 env->mmuregs[3] |= 1 << 6;
2302 if (is_write) {
2303 env->mmuregs[3] |= 1 << 7;
2305 env->mmuregs[3] |= (5 << 2) | 2;
2306 /* SuperSPARC will never place instruction fault addresses in the FAR */
2307 if (!is_exec) {
2308 env->mmuregs[4] = addr; /* Fault address register */
2311 /* overflow (same type fault was not read before another fault) */
2312 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
2313 env->mmuregs[3] |= 1;
2316 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
2317 if (is_exec) {
2318 helper_raise_exception(env, TT_CODE_ACCESS);
2319 } else {
2320 helper_raise_exception(env, TT_DATA_ACCESS);
2324 /* flush neverland mappings created during no-fault mode,
2325 so the sequential MMU faults report proper fault types */
2326 if (env->mmuregs[0] & MMU_NF) {
2327 tlb_flush(cs, 1);
2330 #else
2331 void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
2332 bool is_write, bool is_exec, int is_asi,
2333 unsigned size)
2335 SPARCCPU *cpu = SPARC_CPU(cs);
2336 CPUSPARCState *env = &cpu->env;
2338 #ifdef DEBUG_UNASSIGNED
2339 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
2340 "\n", addr, env->pc);
2341 #endif
2343 if (is_exec) {
2344 helper_raise_exception(env, TT_CODE_ACCESS);
2345 } else {
2346 helper_raise_exception(env, TT_DATA_ACCESS);
2349 #endif
2350 #endif
2352 #if !defined(CONFIG_USER_ONLY)
2353 void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
2354 MMUAccessType access_type,
2355 int mmu_idx,
2356 uintptr_t retaddr)
2358 SPARCCPU *cpu = SPARC_CPU(cs);
2359 CPUSPARCState *env = &cpu->env;
2361 #ifdef DEBUG_UNALIGNED
2362 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
2363 "\n", addr, env->pc);
2364 #endif
2365 if (retaddr) {
2366 cpu_restore_state(CPU(cpu), retaddr);
2368 helper_raise_exception(env, TT_UNALIGNED);
2371 /* try to fill the TLB and return an exception if error. If retaddr is
2372 NULL, it means that the function was called in C code (i.e. not
2373 from generated code or from helper.c) */
2374 /* XXX: fix it to restore all registers */
2375 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
2376 int mmu_idx, uintptr_t retaddr)
2378 int ret;
2380 ret = sparc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
2381 if (ret) {
2382 if (retaddr) {
2383 cpu_restore_state(cs, retaddr);
2385 cpu_loop_exit(cs);
2388 #endif