usb-redir: Notify our peer when we reject a device due to a speed mismatch
[qemu/agraf.git] / target-sparc / ldst_helper.c
blob04ffddf3c09cb6b970bdc9fed45914d0404b284a
1 /*
2 * Helpers for loads and stores
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "helper.h"
23 //#define DEBUG_MMU
24 //#define DEBUG_MXCC
25 //#define DEBUG_UNALIGNED
26 //#define DEBUG_UNASSIGNED
27 //#define DEBUG_ASI
28 //#define DEBUG_CACHE_CONTROL
30 #ifdef DEBUG_MMU
31 #define DPRINTF_MMU(fmt, ...) \
32 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
33 #else
34 #define DPRINTF_MMU(fmt, ...) do {} while (0)
35 #endif
37 #ifdef DEBUG_MXCC
38 #define DPRINTF_MXCC(fmt, ...) \
39 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
40 #else
41 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
42 #endif
44 #ifdef DEBUG_ASI
45 #define DPRINTF_ASI(fmt, ...) \
46 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
47 #endif
49 #ifdef DEBUG_CACHE_CONTROL
50 #define DPRINTF_CACHE_CONTROL(fmt, ...) \
51 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
52 #else
53 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
54 #endif
56 #ifdef TARGET_SPARC64
57 #ifndef TARGET_ABI32
58 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
59 #else
60 #define AM_CHECK(env1) (1)
61 #endif
62 #endif
64 #define QT0 (env->qt0)
65 #define QT1 (env->qt1)
67 #if !defined(CONFIG_USER_ONLY)
68 #include "softmmu_exec.h"
69 #define MMUSUFFIX _mmu
70 #define ALIGNED_ONLY
72 #define SHIFT 0
73 #include "softmmu_template.h"
75 #define SHIFT 1
76 #include "softmmu_template.h"
78 #define SHIFT 2
79 #include "softmmu_template.h"
81 #define SHIFT 3
82 #include "softmmu_template.h"
83 #endif
85 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
86 /* Calculates TSB pointer value for fault page size 8k or 64k */
87 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
88 uint64_t tag_access_register,
89 int page_size)
91 uint64_t tsb_base = tsb_register & ~0x1fffULL;
92 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
93 int tsb_size = tsb_register & 0xf;
95 /* discard lower 13 bits which hold tag access context */
96 uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
98 /* now reorder bits */
99 uint64_t tsb_base_mask = ~0x1fffULL;
100 uint64_t va = tag_access_va;
102 /* move va bits to correct position */
103 if (page_size == 8*1024) {
104 va >>= 9;
105 } else if (page_size == 64*1024) {
106 va >>= 12;
109 if (tsb_size) {
110 tsb_base_mask <<= tsb_size;
113 /* calculate tsb_base mask and adjust va if split is in use */
114 if (tsb_split) {
115 if (page_size == 8*1024) {
116 va &= ~(1ULL << (13 + tsb_size));
117 } else if (page_size == 64*1024) {
118 va |= (1ULL << (13 + tsb_size));
120 tsb_base_mask <<= 1;
123 return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
126 /* Calculates tag target register value by reordering bits
127 in tag access register */
128 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
130 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
133 static void replace_tlb_entry(SparcTLBEntry *tlb,
134 uint64_t tlb_tag, uint64_t tlb_tte,
135 CPUSPARCState *env1)
137 target_ulong mask, size, va, offset;
139 /* flush page range if translation is valid */
140 if (TTE_IS_VALID(tlb->tte)) {
142 mask = 0xffffffffffffe000ULL;
143 mask <<= 3 * ((tlb->tte >> 61) & 3);
144 size = ~mask + 1;
146 va = tlb->tag & mask;
148 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
149 tlb_flush_page(env1, va + offset);
153 tlb->tag = tlb_tag;
154 tlb->tte = tlb_tte;
157 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
158 const char *strmmu, CPUSPARCState *env1)
160 unsigned int i;
161 target_ulong mask;
162 uint64_t context;
164 int is_demap_context = (demap_addr >> 6) & 1;
166 /* demap context */
167 switch ((demap_addr >> 4) & 3) {
168 case 0: /* primary */
169 context = env1->dmmu.mmu_primary_context;
170 break;
171 case 1: /* secondary */
172 context = env1->dmmu.mmu_secondary_context;
173 break;
174 case 2: /* nucleus */
175 context = 0;
176 break;
177 case 3: /* reserved */
178 default:
179 return;
182 for (i = 0; i < 64; i++) {
183 if (TTE_IS_VALID(tlb[i].tte)) {
185 if (is_demap_context) {
186 /* will remove non-global entries matching context value */
187 if (TTE_IS_GLOBAL(tlb[i].tte) ||
188 !tlb_compare_context(&tlb[i], context)) {
189 continue;
191 } else {
192 /* demap page
193 will remove any entry matching VA */
194 mask = 0xffffffffffffe000ULL;
195 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
197 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
198 continue;
201 /* entry should be global or matching context value */
202 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
203 !tlb_compare_context(&tlb[i], context)) {
204 continue;
208 replace_tlb_entry(&tlb[i], 0, 0, env1);
209 #ifdef DEBUG_MMU
210 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
211 dump_mmu(stdout, fprintf, env1);
212 #endif
217 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
218 uint64_t tlb_tag, uint64_t tlb_tte,
219 const char *strmmu, CPUSPARCState *env1)
221 unsigned int i, replace_used;
223 /* Try replacing invalid entry */
224 for (i = 0; i < 64; i++) {
225 if (!TTE_IS_VALID(tlb[i].tte)) {
226 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
227 #ifdef DEBUG_MMU
228 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
229 dump_mmu(stdout, fprintf, env1);
230 #endif
231 return;
235 /* All entries are valid, try replacing unlocked entry */
237 for (replace_used = 0; replace_used < 2; ++replace_used) {
239 /* Used entries are not replaced on first pass */
241 for (i = 0; i < 64; i++) {
242 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
244 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
245 #ifdef DEBUG_MMU
246 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
247 strmmu, (replace_used ? "used" : "unused"), i);
248 dump_mmu(stdout, fprintf, env1);
249 #endif
250 return;
254 /* Now reset used bit and search for unused entries again */
256 for (i = 0; i < 64; i++) {
257 TTE_SET_UNUSED(tlb[i].tte);
261 #ifdef DEBUG_MMU
262 DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
263 #endif
264 /* error state? */
267 #endif
269 static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr)
271 #ifdef TARGET_SPARC64
272 if (AM_CHECK(env1)) {
273 addr &= 0xffffffffULL;
275 #endif
276 return addr;
279 /* returns true if access using this ASI is to have address translated by MMU
280 otherwise access is to raw physical address */
281 static inline int is_translating_asi(int asi)
283 #ifdef TARGET_SPARC64
284 /* Ultrasparc IIi translating asi
285 - note this list is defined by cpu implementation
287 switch (asi) {
288 case 0x04 ... 0x11:
289 case 0x16 ... 0x19:
290 case 0x1E ... 0x1F:
291 case 0x24 ... 0x2C:
292 case 0x70 ... 0x73:
293 case 0x78 ... 0x79:
294 case 0x80 ... 0xFF:
295 return 1;
297 default:
298 return 0;
300 #else
301 /* TODO: check sparc32 bits */
302 return 0;
303 #endif
306 static inline target_ulong asi_address_mask(CPUSPARCState *env,
307 int asi, target_ulong addr)
309 if (is_translating_asi(asi)) {
310 return address_mask(env, addr);
311 } else {
312 return addr;
316 void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align)
318 if (addr & align) {
319 #ifdef DEBUG_UNALIGNED
320 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
321 "\n", addr, env->pc);
322 #endif
323 helper_raise_exception(env, TT_UNALIGNED);
327 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
328 defined(DEBUG_MXCC)
329 static void dump_mxcc(CPUSPARCState *env)
331 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
332 "\n",
333 env->mxccdata[0], env->mxccdata[1],
334 env->mxccdata[2], env->mxccdata[3]);
335 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
336 "\n"
337 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
338 "\n",
339 env->mxccregs[0], env->mxccregs[1],
340 env->mxccregs[2], env->mxccregs[3],
341 env->mxccregs[4], env->mxccregs[5],
342 env->mxccregs[6], env->mxccregs[7]);
344 #endif
346 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
347 && defined(DEBUG_ASI)
348 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
349 uint64_t r1)
351 switch (size) {
352 case 1:
353 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
354 addr, asi, r1 & 0xff);
355 break;
356 case 2:
357 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
358 addr, asi, r1 & 0xffff);
359 break;
360 case 4:
361 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
362 addr, asi, r1 & 0xffffffff);
363 break;
364 case 8:
365 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
366 addr, asi, r1);
367 break;
370 #endif
372 #ifndef TARGET_SPARC64
373 #ifndef CONFIG_USER_ONLY
376 /* Leon3 cache control */
378 static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr,
379 uint64_t val, int size)
381 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
382 addr, val, size);
384 if (size != 4) {
385 DPRINTF_CACHE_CONTROL("32bits only\n");
386 return;
389 switch (addr) {
390 case 0x00: /* Cache control */
392 /* These values must always be read as zeros */
393 val &= ~CACHE_CTRL_FD;
394 val &= ~CACHE_CTRL_FI;
395 val &= ~CACHE_CTRL_IB;
396 val &= ~CACHE_CTRL_IP;
397 val &= ~CACHE_CTRL_DP;
399 env->cache_control = val;
400 break;
401 case 0x04: /* Instruction cache configuration */
402 case 0x08: /* Data cache configuration */
403 /* Read Only */
404 break;
405 default:
406 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
407 break;
411 static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
412 int size)
414 uint64_t ret = 0;
416 if (size != 4) {
417 DPRINTF_CACHE_CONTROL("32bits only\n");
418 return 0;
421 switch (addr) {
422 case 0x00: /* Cache control */
423 ret = env->cache_control;
424 break;
426 /* Configuration registers are read and only always keep those
427 predefined values */
429 case 0x04: /* Instruction cache configuration */
430 ret = 0x10220000;
431 break;
432 case 0x08: /* Data cache configuration */
433 ret = 0x18220000;
434 break;
435 default:
436 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
437 break;
439 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
440 addr, ret, size);
441 return ret;
444 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
445 int sign)
447 uint64_t ret = 0;
448 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
449 uint32_t last_addr = addr;
450 #endif
452 helper_check_align(env, addr, size - 1);
453 switch (asi) {
454 case 2: /* SuperSparc MXCC registers and Leon3 cache control */
455 switch (addr) {
456 case 0x00: /* Leon3 Cache Control */
457 case 0x08: /* Leon3 Instruction Cache config */
458 case 0x0C: /* Leon3 Date Cache config */
459 if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
460 ret = leon3_cache_control_ld(env, addr, size);
462 break;
463 case 0x01c00a00: /* MXCC control register */
464 if (size == 8) {
465 ret = env->mxccregs[3];
466 } else {
467 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
468 size);
470 break;
471 case 0x01c00a04: /* MXCC control register */
472 if (size == 4) {
473 ret = env->mxccregs[3];
474 } else {
475 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
476 size);
478 break;
479 case 0x01c00c00: /* Module reset register */
480 if (size == 8) {
481 ret = env->mxccregs[5];
482 /* should we do something here? */
483 } else {
484 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
485 size);
487 break;
488 case 0x01c00f00: /* MBus port address register */
489 if (size == 8) {
490 ret = env->mxccregs[7];
491 } else {
492 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
493 size);
495 break;
496 default:
497 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
498 size);
499 break;
501 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
502 "addr = %08x -> ret = %" PRIx64 ","
503 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
504 #ifdef DEBUG_MXCC
505 dump_mxcc(env);
506 #endif
507 break;
508 case 3: /* MMU probe */
510 int mmulev;
512 mmulev = (addr >> 8) & 15;
513 if (mmulev > 4) {
514 ret = 0;
515 } else {
516 ret = mmu_probe(env, addr, mmulev);
518 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
519 addr, mmulev, ret);
521 break;
522 case 4: /* read MMU regs */
524 int reg = (addr >> 8) & 0x1f;
526 ret = env->mmuregs[reg];
527 if (reg == 3) { /* Fault status cleared on read */
528 env->mmuregs[3] = 0;
529 } else if (reg == 0x13) { /* Fault status read */
530 ret = env->mmuregs[3];
531 } else if (reg == 0x14) { /* Fault address read */
532 ret = env->mmuregs[4];
534 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
536 break;
537 case 5: /* Turbosparc ITLB Diagnostic */
538 case 6: /* Turbosparc DTLB Diagnostic */
539 case 7: /* Turbosparc IOTLB Diagnostic */
540 break;
541 case 9: /* Supervisor code access */
542 switch (size) {
543 case 1:
544 ret = cpu_ldub_code(env, addr);
545 break;
546 case 2:
547 ret = cpu_lduw_code(env, addr);
548 break;
549 default:
550 case 4:
551 ret = cpu_ldl_code(env, addr);
552 break;
553 case 8:
554 ret = cpu_ldq_code(env, addr);
555 break;
557 break;
558 case 0xa: /* User data access */
559 switch (size) {
560 case 1:
561 ret = cpu_ldub_user(env, addr);
562 break;
563 case 2:
564 ret = cpu_lduw_user(env, addr);
565 break;
566 default:
567 case 4:
568 ret = cpu_ldl_user(env, addr);
569 break;
570 case 8:
571 ret = cpu_ldq_user(env, addr);
572 break;
574 break;
575 case 0xb: /* Supervisor data access */
576 switch (size) {
577 case 1:
578 ret = cpu_ldub_kernel(env, addr);
579 break;
580 case 2:
581 ret = cpu_lduw_kernel(env, addr);
582 break;
583 default:
584 case 4:
585 ret = cpu_ldl_kernel(env, addr);
586 break;
587 case 8:
588 ret = cpu_ldq_kernel(env, addr);
589 break;
591 break;
592 case 0xc: /* I-cache tag */
593 case 0xd: /* I-cache data */
594 case 0xe: /* D-cache tag */
595 case 0xf: /* D-cache data */
596 break;
597 case 0x20: /* MMU passthrough */
598 switch (size) {
599 case 1:
600 ret = ldub_phys(addr);
601 break;
602 case 2:
603 ret = lduw_phys(addr);
604 break;
605 default:
606 case 4:
607 ret = ldl_phys(addr);
608 break;
609 case 8:
610 ret = ldq_phys(addr);
611 break;
613 break;
614 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
615 switch (size) {
616 case 1:
617 ret = ldub_phys((target_phys_addr_t)addr
618 | ((target_phys_addr_t)(asi & 0xf) << 32));
619 break;
620 case 2:
621 ret = lduw_phys((target_phys_addr_t)addr
622 | ((target_phys_addr_t)(asi & 0xf) << 32));
623 break;
624 default:
625 case 4:
626 ret = ldl_phys((target_phys_addr_t)addr
627 | ((target_phys_addr_t)(asi & 0xf) << 32));
628 break;
629 case 8:
630 ret = ldq_phys((target_phys_addr_t)addr
631 | ((target_phys_addr_t)(asi & 0xf) << 32));
632 break;
634 break;
635 case 0x30: /* Turbosparc secondary cache diagnostic */
636 case 0x31: /* Turbosparc RAM snoop */
637 case 0x32: /* Turbosparc page table descriptor diagnostic */
638 case 0x39: /* data cache diagnostic register */
639 ret = 0;
640 break;
641 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
643 int reg = (addr >> 8) & 3;
645 switch (reg) {
646 case 0: /* Breakpoint Value (Addr) */
647 ret = env->mmubpregs[reg];
648 break;
649 case 1: /* Breakpoint Mask */
650 ret = env->mmubpregs[reg];
651 break;
652 case 2: /* Breakpoint Control */
653 ret = env->mmubpregs[reg];
654 break;
655 case 3: /* Breakpoint Status */
656 ret = env->mmubpregs[reg];
657 env->mmubpregs[reg] = 0ULL;
658 break;
660 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
661 ret);
663 break;
664 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
665 ret = env->mmubpctrv;
666 break;
667 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
668 ret = env->mmubpctrc;
669 break;
670 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
671 ret = env->mmubpctrs;
672 break;
673 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
674 ret = env->mmubpaction;
675 break;
676 case 8: /* User code access, XXX */
677 default:
678 cpu_unassigned_access(env, addr, 0, 0, asi, size);
679 ret = 0;
680 break;
682 if (sign) {
683 switch (size) {
684 case 1:
685 ret = (int8_t) ret;
686 break;
687 case 2:
688 ret = (int16_t) ret;
689 break;
690 case 4:
691 ret = (int32_t) ret;
692 break;
693 default:
694 break;
697 #ifdef DEBUG_ASI
698 dump_asi("read ", last_addr, asi, size, ret);
699 #endif
700 return ret;
703 void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
704 int size)
706 helper_check_align(env, addr, size - 1);
707 switch (asi) {
708 case 2: /* SuperSparc MXCC registers and Leon3 cache control */
709 switch (addr) {
710 case 0x00: /* Leon3 Cache Control */
711 case 0x08: /* Leon3 Instruction Cache config */
712 case 0x0C: /* Leon3 Date Cache config */
713 if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
714 leon3_cache_control_st(env, addr, val, size);
716 break;
718 case 0x01c00000: /* MXCC stream data register 0 */
719 if (size == 8) {
720 env->mxccdata[0] = val;
721 } else {
722 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
723 size);
725 break;
726 case 0x01c00008: /* MXCC stream data register 1 */
727 if (size == 8) {
728 env->mxccdata[1] = val;
729 } else {
730 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
731 size);
733 break;
734 case 0x01c00010: /* MXCC stream data register 2 */
735 if (size == 8) {
736 env->mxccdata[2] = val;
737 } else {
738 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
739 size);
741 break;
742 case 0x01c00018: /* MXCC stream data register 3 */
743 if (size == 8) {
744 env->mxccdata[3] = val;
745 } else {
746 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
747 size);
749 break;
750 case 0x01c00100: /* MXCC stream source */
751 if (size == 8) {
752 env->mxccregs[0] = val;
753 } else {
754 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
755 size);
757 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
759 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
761 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
762 16);
763 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
764 24);
765 break;
766 case 0x01c00200: /* MXCC stream destination */
767 if (size == 8) {
768 env->mxccregs[1] = val;
769 } else {
770 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
771 size);
773 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
774 env->mxccdata[0]);
775 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
776 env->mxccdata[1]);
777 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
778 env->mxccdata[2]);
779 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
780 env->mxccdata[3]);
781 break;
782 case 0x01c00a00: /* MXCC control register */
783 if (size == 8) {
784 env->mxccregs[3] = val;
785 } else {
786 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
787 size);
789 break;
790 case 0x01c00a04: /* MXCC control register */
791 if (size == 4) {
792 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
793 | val;
794 } else {
795 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
796 size);
798 break;
799 case 0x01c00e00: /* MXCC error register */
800 /* writing a 1 bit clears the error */
801 if (size == 8) {
802 env->mxccregs[6] &= ~val;
803 } else {
804 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
805 size);
807 break;
808 case 0x01c00f00: /* MBus port address register */
809 if (size == 8) {
810 env->mxccregs[7] = val;
811 } else {
812 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
813 size);
815 break;
816 default:
817 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
818 size);
819 break;
821 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
822 asi, size, addr, val);
823 #ifdef DEBUG_MXCC
824 dump_mxcc(env);
825 #endif
826 break;
827 case 3: /* MMU flush */
829 int mmulev;
831 mmulev = (addr >> 8) & 15;
832 DPRINTF_MMU("mmu flush level %d\n", mmulev);
833 switch (mmulev) {
834 case 0: /* flush page */
835 tlb_flush_page(env, addr & 0xfffff000);
836 break;
837 case 1: /* flush segment (256k) */
838 case 2: /* flush region (16M) */
839 case 3: /* flush context (4G) */
840 case 4: /* flush entire */
841 tlb_flush(env, 1);
842 break;
843 default:
844 break;
846 #ifdef DEBUG_MMU
847 dump_mmu(stdout, fprintf, env);
848 #endif
850 break;
851 case 4: /* write MMU regs */
853 int reg = (addr >> 8) & 0x1f;
854 uint32_t oldreg;
856 oldreg = env->mmuregs[reg];
857 switch (reg) {
858 case 0: /* Control Register */
859 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
860 (val & 0x00ffffff);
861 /* Mappings generated during no-fault mode or MMU
862 disabled mode are invalid in normal mode */
863 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
864 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm))) {
865 tlb_flush(env, 1);
867 break;
868 case 1: /* Context Table Pointer Register */
869 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
870 break;
871 case 2: /* Context Register */
872 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
873 if (oldreg != env->mmuregs[reg]) {
874 /* we flush when the MMU context changes because
875 QEMU has no MMU context support */
876 tlb_flush(env, 1);
878 break;
879 case 3: /* Synchronous Fault Status Register with Clear */
880 case 4: /* Synchronous Fault Address Register */
881 break;
882 case 0x10: /* TLB Replacement Control Register */
883 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
884 break;
885 case 0x13: /* Synchronous Fault Status Register with Read
886 and Clear */
887 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
888 break;
889 case 0x14: /* Synchronous Fault Address Register */
890 env->mmuregs[4] = val;
891 break;
892 default:
893 env->mmuregs[reg] = val;
894 break;
896 if (oldreg != env->mmuregs[reg]) {
897 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
898 reg, oldreg, env->mmuregs[reg]);
900 #ifdef DEBUG_MMU
901 dump_mmu(stdout, fprintf, env);
902 #endif
904 break;
905 case 5: /* Turbosparc ITLB Diagnostic */
906 case 6: /* Turbosparc DTLB Diagnostic */
907 case 7: /* Turbosparc IOTLB Diagnostic */
908 break;
909 case 0xa: /* User data access */
910 switch (size) {
911 case 1:
912 cpu_stb_user(env, addr, val);
913 break;
914 case 2:
915 cpu_stw_user(env, addr, val);
916 break;
917 default:
918 case 4:
919 cpu_stl_user(env, addr, val);
920 break;
921 case 8:
922 cpu_stq_user(env, addr, val);
923 break;
925 break;
926 case 0xb: /* Supervisor data access */
927 switch (size) {
928 case 1:
929 cpu_stb_kernel(env, addr, val);
930 break;
931 case 2:
932 cpu_stw_kernel(env, addr, val);
933 break;
934 default:
935 case 4:
936 cpu_stl_kernel(env, addr, val);
937 break;
938 case 8:
939 cpu_stq_kernel(env, addr, val);
940 break;
942 break;
943 case 0xc: /* I-cache tag */
944 case 0xd: /* I-cache data */
945 case 0xe: /* D-cache tag */
946 case 0xf: /* D-cache data */
947 case 0x10: /* I/D-cache flush page */
948 case 0x11: /* I/D-cache flush segment */
949 case 0x12: /* I/D-cache flush region */
950 case 0x13: /* I/D-cache flush context */
951 case 0x14: /* I/D-cache flush user */
952 break;
953 case 0x17: /* Block copy, sta access */
955 /* val = src
956 addr = dst
957 copy 32 bytes */
958 unsigned int i;
959 uint32_t src = val & ~3, dst = addr & ~3, temp;
961 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
962 temp = cpu_ldl_kernel(env, src);
963 cpu_stl_kernel(env, dst, temp);
966 break;
967 case 0x1f: /* Block fill, stda access */
969 /* addr = dst
970 fill 32 bytes with val */
971 unsigned int i;
972 uint32_t dst = addr & 7;
974 for (i = 0; i < 32; i += 8, dst += 8) {
975 cpu_stq_kernel(env, dst, val);
978 break;
979 case 0x20: /* MMU passthrough */
981 switch (size) {
982 case 1:
983 stb_phys(addr, val);
984 break;
985 case 2:
986 stw_phys(addr, val);
987 break;
988 case 4:
989 default:
990 stl_phys(addr, val);
991 break;
992 case 8:
993 stq_phys(addr, val);
994 break;
997 break;
998 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1000 switch (size) {
1001 case 1:
1002 stb_phys((target_phys_addr_t)addr
1003 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1004 break;
1005 case 2:
1006 stw_phys((target_phys_addr_t)addr
1007 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1008 break;
1009 case 4:
1010 default:
1011 stl_phys((target_phys_addr_t)addr
1012 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1013 break;
1014 case 8:
1015 stq_phys((target_phys_addr_t)addr
1016 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1017 break;
1020 break;
1021 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
1022 case 0x31: /* store buffer data, Ross RT620 I-cache flush or
1023 Turbosparc snoop RAM */
1024 case 0x32: /* store buffer control or Turbosparc page table
1025 descriptor diagnostic */
1026 case 0x36: /* I-cache flash clear */
1027 case 0x37: /* D-cache flash clear */
1028 break;
1029 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1031 int reg = (addr >> 8) & 3;
1033 switch (reg) {
1034 case 0: /* Breakpoint Value (Addr) */
1035 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1036 break;
1037 case 1: /* Breakpoint Mask */
1038 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1039 break;
1040 case 2: /* Breakpoint Control */
1041 env->mmubpregs[reg] = (val & 0x7fULL);
1042 break;
1043 case 3: /* Breakpoint Status */
1044 env->mmubpregs[reg] = (val & 0xfULL);
1045 break;
1047 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
1048 env->mmuregs[reg]);
1050 break;
1051 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1052 env->mmubpctrv = val & 0xffffffff;
1053 break;
1054 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1055 env->mmubpctrc = val & 0x3;
1056 break;
1057 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1058 env->mmubpctrs = val & 0x3;
1059 break;
1060 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1061 env->mmubpaction = val & 0x1fff;
1062 break;
1063 case 8: /* User code access, XXX */
1064 case 9: /* Supervisor code access, XXX */
1065 default:
1066 cpu_unassigned_access(env, addr, 1, 0, asi, size);
1067 break;
1069 #ifdef DEBUG_ASI
1070 dump_asi("write", addr, asi, size, val);
1071 #endif
1074 #endif /* CONFIG_USER_ONLY */
1075 #else /* TARGET_SPARC64 */
1077 #ifdef CONFIG_USER_ONLY
1078 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
1079 int sign)
1081 uint64_t ret = 0;
1082 #if defined(DEBUG_ASI)
1083 target_ulong last_addr = addr;
1084 #endif
1086 if (asi < 0x80) {
1087 helper_raise_exception(env, TT_PRIV_ACT);
1090 helper_check_align(env, addr, size - 1);
1091 addr = asi_address_mask(env, asi, addr);
1093 switch (asi) {
1094 case 0x82: /* Primary no-fault */
1095 case 0x8a: /* Primary no-fault LE */
1096 if (page_check_range(addr, size, PAGE_READ) == -1) {
1097 #ifdef DEBUG_ASI
1098 dump_asi("read ", last_addr, asi, size, ret);
1099 #endif
1100 return 0;
1102 /* Fall through */
1103 case 0x80: /* Primary */
1104 case 0x88: /* Primary LE */
1106 switch (size) {
1107 case 1:
1108 ret = ldub_raw(addr);
1109 break;
1110 case 2:
1111 ret = lduw_raw(addr);
1112 break;
1113 case 4:
1114 ret = ldl_raw(addr);
1115 break;
1116 default:
1117 case 8:
1118 ret = ldq_raw(addr);
1119 break;
1122 break;
1123 case 0x83: /* Secondary no-fault */
1124 case 0x8b: /* Secondary no-fault LE */
1125 if (page_check_range(addr, size, PAGE_READ) == -1) {
1126 #ifdef DEBUG_ASI
1127 dump_asi("read ", last_addr, asi, size, ret);
1128 #endif
1129 return 0;
1131 /* Fall through */
1132 case 0x81: /* Secondary */
1133 case 0x89: /* Secondary LE */
1134 /* XXX */
1135 break;
1136 default:
1137 break;
1140 /* Convert from little endian */
1141 switch (asi) {
1142 case 0x88: /* Primary LE */
1143 case 0x89: /* Secondary LE */
1144 case 0x8a: /* Primary no-fault LE */
1145 case 0x8b: /* Secondary no-fault LE */
1146 switch (size) {
1147 case 2:
1148 ret = bswap16(ret);
1149 break;
1150 case 4:
1151 ret = bswap32(ret);
1152 break;
1153 case 8:
1154 ret = bswap64(ret);
1155 break;
1156 default:
1157 break;
1159 default:
1160 break;
1163 /* Convert to signed number */
1164 if (sign) {
1165 switch (size) {
1166 case 1:
1167 ret = (int8_t) ret;
1168 break;
1169 case 2:
1170 ret = (int16_t) ret;
1171 break;
1172 case 4:
1173 ret = (int32_t) ret;
1174 break;
1175 default:
1176 break;
1179 #ifdef DEBUG_ASI
1180 dump_asi("read ", last_addr, asi, size, ret);
1181 #endif
1182 return ret;
1185 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1186 int asi, int size)
1188 #ifdef DEBUG_ASI
1189 dump_asi("write", addr, asi, size, val);
1190 #endif
1191 if (asi < 0x80) {
1192 helper_raise_exception(env, TT_PRIV_ACT);
1195 helper_check_align(env, addr, size - 1);
1196 addr = asi_address_mask(env, asi, addr);
1198 /* Convert to little endian */
1199 switch (asi) {
1200 case 0x88: /* Primary LE */
1201 case 0x89: /* Secondary LE */
1202 switch (size) {
1203 case 2:
1204 val = bswap16(val);
1205 break;
1206 case 4:
1207 val = bswap32(val);
1208 break;
1209 case 8:
1210 val = bswap64(val);
1211 break;
1212 default:
1213 break;
1215 default:
1216 break;
1219 switch (asi) {
1220 case 0x80: /* Primary */
1221 case 0x88: /* Primary LE */
1223 switch (size) {
1224 case 1:
1225 stb_raw(addr, val);
1226 break;
1227 case 2:
1228 stw_raw(addr, val);
1229 break;
1230 case 4:
1231 stl_raw(addr, val);
1232 break;
1233 case 8:
1234 default:
1235 stq_raw(addr, val);
1236 break;
1239 break;
1240 case 0x81: /* Secondary */
1241 case 0x89: /* Secondary LE */
1242 /* XXX */
1243 return;
1245 case 0x82: /* Primary no-fault, RO */
1246 case 0x83: /* Secondary no-fault, RO */
1247 case 0x8a: /* Primary no-fault LE, RO */
1248 case 0x8b: /* Secondary no-fault LE, RO */
1249 default:
1250 helper_raise_exception(env, TT_DATA_ACCESS);
1251 return;
1255 #else /* CONFIG_USER_ONLY */
1257 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
1258 int sign)
1260 uint64_t ret = 0;
1261 #if defined(DEBUG_ASI)
1262 target_ulong last_addr = addr;
1263 #endif
1265 asi &= 0xff;
1267 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1268 || (cpu_has_hypervisor(env)
1269 && asi >= 0x30 && asi < 0x80
1270 && !(env->hpstate & HS_PRIV))) {
1271 helper_raise_exception(env, TT_PRIV_ACT);
1274 helper_check_align(env, addr, size - 1);
1275 addr = asi_address_mask(env, asi, addr);
1277 /* process nonfaulting loads first */
1278 if ((asi & 0xf6) == 0x82) {
1279 int mmu_idx;
1281 /* secondary space access has lowest asi bit equal to 1 */
1282 if (env->pstate & PS_PRIV) {
1283 mmu_idx = (asi & 1) ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX;
1284 } else {
1285 mmu_idx = (asi & 1) ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX;
1288 if (cpu_get_phys_page_nofault(env, addr, mmu_idx) == -1ULL) {
1289 #ifdef DEBUG_ASI
1290 dump_asi("read ", last_addr, asi, size, ret);
1291 #endif
1292 /* env->exception_index is set in get_physical_address_data(). */
1293 helper_raise_exception(env, env->exception_index);
1296 /* convert nonfaulting load ASIs to normal load ASIs */
1297 asi &= ~0x02;
1300 switch (asi) {
1301 case 0x10: /* As if user primary */
1302 case 0x11: /* As if user secondary */
1303 case 0x18: /* As if user primary LE */
1304 case 0x19: /* As if user secondary LE */
1305 case 0x80: /* Primary */
1306 case 0x81: /* Secondary */
1307 case 0x88: /* Primary LE */
1308 case 0x89: /* Secondary LE */
1309 case 0xe2: /* UA2007 Primary block init */
1310 case 0xe3: /* UA2007 Secondary block init */
1311 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1312 if (cpu_hypervisor_mode(env)) {
1313 switch (size) {
1314 case 1:
1315 ret = cpu_ldub_hypv(env, addr);
1316 break;
1317 case 2:
1318 ret = cpu_lduw_hypv(env, addr);
1319 break;
1320 case 4:
1321 ret = cpu_ldl_hypv(env, addr);
1322 break;
1323 default:
1324 case 8:
1325 ret = cpu_ldq_hypv(env, addr);
1326 break;
1328 } else {
1329 /* secondary space access has lowest asi bit equal to 1 */
1330 if (asi & 1) {
1331 switch (size) {
1332 case 1:
1333 ret = cpu_ldub_kernel_secondary(env, addr);
1334 break;
1335 case 2:
1336 ret = cpu_lduw_kernel_secondary(env, addr);
1337 break;
1338 case 4:
1339 ret = cpu_ldl_kernel_secondary(env, addr);
1340 break;
1341 default:
1342 case 8:
1343 ret = cpu_ldq_kernel_secondary(env, addr);
1344 break;
1346 } else {
1347 switch (size) {
1348 case 1:
1349 ret = cpu_ldub_kernel(env, addr);
1350 break;
1351 case 2:
1352 ret = cpu_lduw_kernel(env, addr);
1353 break;
1354 case 4:
1355 ret = cpu_ldl_kernel(env, addr);
1356 break;
1357 default:
1358 case 8:
1359 ret = cpu_ldq_kernel(env, addr);
1360 break;
1364 } else {
1365 /* secondary space access has lowest asi bit equal to 1 */
1366 if (asi & 1) {
1367 switch (size) {
1368 case 1:
1369 ret = cpu_ldub_user_secondary(env, addr);
1370 break;
1371 case 2:
1372 ret = cpu_lduw_user_secondary(env, addr);
1373 break;
1374 case 4:
1375 ret = cpu_ldl_user_secondary(env, addr);
1376 break;
1377 default:
1378 case 8:
1379 ret = cpu_ldq_user_secondary(env, addr);
1380 break;
1382 } else {
1383 switch (size) {
1384 case 1:
1385 ret = cpu_ldub_user(env, addr);
1386 break;
1387 case 2:
1388 ret = cpu_lduw_user(env, addr);
1389 break;
1390 case 4:
1391 ret = cpu_ldl_user(env, addr);
1392 break;
1393 default:
1394 case 8:
1395 ret = cpu_ldq_user(env, addr);
1396 break;
1400 break;
1401 case 0x14: /* Bypass */
1402 case 0x15: /* Bypass, non-cacheable */
1403 case 0x1c: /* Bypass LE */
1404 case 0x1d: /* Bypass, non-cacheable LE */
1406 switch (size) {
1407 case 1:
1408 ret = ldub_phys(addr);
1409 break;
1410 case 2:
1411 ret = lduw_phys(addr);
1412 break;
1413 case 4:
1414 ret = ldl_phys(addr);
1415 break;
1416 default:
1417 case 8:
1418 ret = ldq_phys(addr);
1419 break;
1421 break;
1423 case 0x24: /* Nucleus quad LDD 128 bit atomic */
1424 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE
1425 Only ldda allowed */
1426 helper_raise_exception(env, TT_ILL_INSN);
1427 return 0;
1428 case 0x04: /* Nucleus */
1429 case 0x0c: /* Nucleus Little Endian (LE) */
1431 switch (size) {
1432 case 1:
1433 ret = cpu_ldub_nucleus(env, addr);
1434 break;
1435 case 2:
1436 ret = cpu_lduw_nucleus(env, addr);
1437 break;
1438 case 4:
1439 ret = cpu_ldl_nucleus(env, addr);
1440 break;
1441 default:
1442 case 8:
1443 ret = cpu_ldq_nucleus(env, addr);
1444 break;
1446 break;
1448 case 0x4a: /* UPA config */
1449 /* XXX */
1450 break;
1451 case 0x45: /* LSU */
1452 ret = env->lsu;
1453 break;
1454 case 0x50: /* I-MMU regs */
1456 int reg = (addr >> 3) & 0xf;
1458 if (reg == 0) {
1459 /* I-TSB Tag Target register */
1460 ret = ultrasparc_tag_target(env->immu.tag_access);
1461 } else {
1462 ret = env->immuregs[reg];
1465 break;
1467 case 0x51: /* I-MMU 8k TSB pointer */
1469 /* env->immuregs[5] holds I-MMU TSB register value
1470 env->immuregs[6] holds I-MMU Tag Access register value */
1471 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
1472 8*1024);
1473 break;
1475 case 0x52: /* I-MMU 64k TSB pointer */
1477 /* env->immuregs[5] holds I-MMU TSB register value
1478 env->immuregs[6] holds I-MMU Tag Access register value */
1479 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
1480 64*1024);
1481 break;
1483 case 0x55: /* I-MMU data access */
1485 int reg = (addr >> 3) & 0x3f;
1487 ret = env->itlb[reg].tte;
1488 break;
1490 case 0x56: /* I-MMU tag read */
1492 int reg = (addr >> 3) & 0x3f;
1494 ret = env->itlb[reg].tag;
1495 break;
1497 case 0x58: /* D-MMU regs */
1499 int reg = (addr >> 3) & 0xf;
1501 if (reg == 0) {
1502 /* D-TSB Tag Target register */
1503 ret = ultrasparc_tag_target(env->dmmu.tag_access);
1504 } else {
1505 ret = env->dmmuregs[reg];
1507 break;
1509 case 0x59: /* D-MMU 8k TSB pointer */
1511 /* env->dmmuregs[5] holds D-MMU TSB register value
1512 env->dmmuregs[6] holds D-MMU Tag Access register value */
1513 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
1514 8*1024);
1515 break;
1517 case 0x5a: /* D-MMU 64k TSB pointer */
1519 /* env->dmmuregs[5] holds D-MMU TSB register value
1520 env->dmmuregs[6] holds D-MMU Tag Access register value */
1521 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
1522 64*1024);
1523 break;
1525 case 0x5d: /* D-MMU data access */
1527 int reg = (addr >> 3) & 0x3f;
1529 ret = env->dtlb[reg].tte;
1530 break;
1532 case 0x5e: /* D-MMU tag read */
1534 int reg = (addr >> 3) & 0x3f;
1536 ret = env->dtlb[reg].tag;
1537 break;
1539 case 0x48: /* Interrupt dispatch, RO */
1540 break;
1541 case 0x49: /* Interrupt data receive */
1542 ret = env->ivec_status;
1543 break;
1544 case 0x7f: /* Incoming interrupt vector, RO */
1546 int reg = (addr >> 4) & 0x3;
1547 if (reg < 3) {
1548 ret = env->ivec_data[reg];
1550 break;
1552 case 0x46: /* D-cache data */
1553 case 0x47: /* D-cache tag access */
1554 case 0x4b: /* E-cache error enable */
1555 case 0x4c: /* E-cache asynchronous fault status */
1556 case 0x4d: /* E-cache asynchronous fault address */
1557 case 0x4e: /* E-cache tag data */
1558 case 0x66: /* I-cache instruction access */
1559 case 0x67: /* I-cache tag access */
1560 case 0x6e: /* I-cache predecode */
1561 case 0x6f: /* I-cache LRU etc. */
1562 case 0x76: /* E-cache tag */
1563 case 0x7e: /* E-cache tag */
1564 break;
1565 case 0x5b: /* D-MMU data pointer */
1566 case 0x54: /* I-MMU data in, WO */
1567 case 0x57: /* I-MMU demap, WO */
1568 case 0x5c: /* D-MMU data in, WO */
1569 case 0x5f: /* D-MMU demap, WO */
1570 case 0x77: /* Interrupt vector, WO */
1571 default:
1572 cpu_unassigned_access(env, addr, 0, 0, 1, size);
1573 ret = 0;
1574 break;
1577 /* Convert from little endian */
1578 switch (asi) {
1579 case 0x0c: /* Nucleus Little Endian (LE) */
1580 case 0x18: /* As if user primary LE */
1581 case 0x19: /* As if user secondary LE */
1582 case 0x1c: /* Bypass LE */
1583 case 0x1d: /* Bypass, non-cacheable LE */
1584 case 0x88: /* Primary LE */
1585 case 0x89: /* Secondary LE */
1586 switch(size) {
1587 case 2:
1588 ret = bswap16(ret);
1589 break;
1590 case 4:
1591 ret = bswap32(ret);
1592 break;
1593 case 8:
1594 ret = bswap64(ret);
1595 break;
1596 default:
1597 break;
1599 default:
1600 break;
1603 /* Convert to signed number */
1604 if (sign) {
1605 switch (size) {
1606 case 1:
1607 ret = (int8_t) ret;
1608 break;
1609 case 2:
1610 ret = (int16_t) ret;
1611 break;
1612 case 4:
1613 ret = (int32_t) ret;
1614 break;
1615 default:
1616 break;
1619 #ifdef DEBUG_ASI
1620 dump_asi("read ", last_addr, asi, size, ret);
1621 #endif
1622 return ret;
1625 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1626 int asi, int size)
1628 #ifdef DEBUG_ASI
1629 dump_asi("write", addr, asi, size, val);
1630 #endif
1632 asi &= 0xff;
1634 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1635 || (cpu_has_hypervisor(env)
1636 && asi >= 0x30 && asi < 0x80
1637 && !(env->hpstate & HS_PRIV))) {
1638 helper_raise_exception(env, TT_PRIV_ACT);
1641 helper_check_align(env, addr, size - 1);
1642 addr = asi_address_mask(env, asi, addr);
1644 /* Convert to little endian */
1645 switch (asi) {
1646 case 0x0c: /* Nucleus Little Endian (LE) */
1647 case 0x18: /* As if user primary LE */
1648 case 0x19: /* As if user secondary LE */
1649 case 0x1c: /* Bypass LE */
1650 case 0x1d: /* Bypass, non-cacheable LE */
1651 case 0x88: /* Primary LE */
1652 case 0x89: /* Secondary LE */
1653 switch (size) {
1654 case 2:
1655 val = bswap16(val);
1656 break;
1657 case 4:
1658 val = bswap32(val);
1659 break;
1660 case 8:
1661 val = bswap64(val);
1662 break;
1663 default:
1664 break;
1666 default:
1667 break;
1670 switch (asi) {
1671 case 0x10: /* As if user primary */
1672 case 0x11: /* As if user secondary */
1673 case 0x18: /* As if user primary LE */
1674 case 0x19: /* As if user secondary LE */
1675 case 0x80: /* Primary */
1676 case 0x81: /* Secondary */
1677 case 0x88: /* Primary LE */
1678 case 0x89: /* Secondary LE */
1679 case 0xe2: /* UA2007 Primary block init */
1680 case 0xe3: /* UA2007 Secondary block init */
1681 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1682 if (cpu_hypervisor_mode(env)) {
1683 switch (size) {
1684 case 1:
1685 cpu_stb_hypv(env, addr, val);
1686 break;
1687 case 2:
1688 cpu_stw_hypv(env, addr, val);
1689 break;
1690 case 4:
1691 cpu_stl_hypv(env, addr, val);
1692 break;
1693 case 8:
1694 default:
1695 cpu_stq_hypv(env, addr, val);
1696 break;
1698 } else {
1699 /* secondary space access has lowest asi bit equal to 1 */
1700 if (asi & 1) {
1701 switch (size) {
1702 case 1:
1703 cpu_stb_kernel_secondary(env, addr, val);
1704 break;
1705 case 2:
1706 cpu_stw_kernel_secondary(env, addr, val);
1707 break;
1708 case 4:
1709 cpu_stl_kernel_secondary(env, addr, val);
1710 break;
1711 case 8:
1712 default:
1713 cpu_stq_kernel_secondary(env, addr, val);
1714 break;
1716 } else {
1717 switch (size) {
1718 case 1:
1719 cpu_stb_kernel(env, addr, val);
1720 break;
1721 case 2:
1722 cpu_stw_kernel(env, addr, val);
1723 break;
1724 case 4:
1725 cpu_stl_kernel(env, addr, val);
1726 break;
1727 case 8:
1728 default:
1729 cpu_stq_kernel(env, addr, val);
1730 break;
1734 } else {
1735 /* secondary space access has lowest asi bit equal to 1 */
1736 if (asi & 1) {
1737 switch (size) {
1738 case 1:
1739 cpu_stb_user_secondary(env, addr, val);
1740 break;
1741 case 2:
1742 cpu_stw_user_secondary(env, addr, val);
1743 break;
1744 case 4:
1745 cpu_stl_user_secondary(env, addr, val);
1746 break;
1747 case 8:
1748 default:
1749 cpu_stq_user_secondary(env, addr, val);
1750 break;
1752 } else {
1753 switch (size) {
1754 case 1:
1755 cpu_stb_user(env, addr, val);
1756 break;
1757 case 2:
1758 cpu_stw_user(env, addr, val);
1759 break;
1760 case 4:
1761 cpu_stl_user(env, addr, val);
1762 break;
1763 case 8:
1764 default:
1765 cpu_stq_user(env, addr, val);
1766 break;
1770 break;
1771 case 0x14: /* Bypass */
1772 case 0x15: /* Bypass, non-cacheable */
1773 case 0x1c: /* Bypass LE */
1774 case 0x1d: /* Bypass, non-cacheable LE */
1776 switch (size) {
1777 case 1:
1778 stb_phys(addr, val);
1779 break;
1780 case 2:
1781 stw_phys(addr, val);
1782 break;
1783 case 4:
1784 stl_phys(addr, val);
1785 break;
1786 case 8:
1787 default:
1788 stq_phys(addr, val);
1789 break;
1792 return;
1793 case 0x24: /* Nucleus quad LDD 128 bit atomic */
1794 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE
1795 Only ldda allowed */
1796 helper_raise_exception(env, TT_ILL_INSN);
1797 return;
1798 case 0x04: /* Nucleus */
1799 case 0x0c: /* Nucleus Little Endian (LE) */
1801 switch (size) {
1802 case 1:
1803 cpu_stb_nucleus(env, addr, val);
1804 break;
1805 case 2:
1806 cpu_stw_nucleus(env, addr, val);
1807 break;
1808 case 4:
1809 cpu_stl_nucleus(env, addr, val);
1810 break;
1811 default:
1812 case 8:
1813 cpu_stq_nucleus(env, addr, val);
1814 break;
1816 break;
1819 case 0x4a: /* UPA config */
1820 /* XXX */
1821 return;
1822 case 0x45: /* LSU */
1824 uint64_t oldreg;
1826 oldreg = env->lsu;
1827 env->lsu = val & (DMMU_E | IMMU_E);
1828 /* Mappings generated during D/I MMU disabled mode are
1829 invalid in normal mode */
1830 if (oldreg != env->lsu) {
1831 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
1832 oldreg, env->lsu);
1833 #ifdef DEBUG_MMU
1834 dump_mmu(stdout, fprintf, env1);
1835 #endif
1836 tlb_flush(env, 1);
1838 return;
1840 case 0x50: /* I-MMU regs */
1842 int reg = (addr >> 3) & 0xf;
1843 uint64_t oldreg;
1845 oldreg = env->immuregs[reg];
1846 switch (reg) {
1847 case 0: /* RO */
1848 return;
1849 case 1: /* Not in I-MMU */
1850 case 2:
1851 return;
1852 case 3: /* SFSR */
1853 if ((val & 1) == 0) {
1854 val = 0; /* Clear SFSR */
1856 env->immu.sfsr = val;
1857 break;
1858 case 4: /* RO */
1859 return;
1860 case 5: /* TSB access */
1861 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
1862 PRIx64 "\n", env->immu.tsb, val);
1863 env->immu.tsb = val;
1864 break;
1865 case 6: /* Tag access */
1866 env->immu.tag_access = val;
1867 break;
1868 case 7:
1869 case 8:
1870 return;
1871 default:
1872 break;
1875 if (oldreg != env->immuregs[reg]) {
1876 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1877 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1879 #ifdef DEBUG_MMU
1880 dump_mmu(stdout, fprintf, env);
1881 #endif
1882 return;
1884 case 0x54: /* I-MMU data in */
1885 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
1886 return;
1887 case 0x55: /* I-MMU data access */
1889 /* TODO: auto demap */
1891 unsigned int i = (addr >> 3) & 0x3f;
1893 replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
1895 #ifdef DEBUG_MMU
1896 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
1897 dump_mmu(stdout, fprintf, env);
1898 #endif
1899 return;
1901 case 0x57: /* I-MMU demap */
1902 demap_tlb(env->itlb, addr, "immu", env);
1903 return;
1904 case 0x58: /* D-MMU regs */
1906 int reg = (addr >> 3) & 0xf;
1907 uint64_t oldreg;
1909 oldreg = env->dmmuregs[reg];
1910 switch (reg) {
1911 case 0: /* RO */
1912 case 4:
1913 return;
1914 case 3: /* SFSR */
1915 if ((val & 1) == 0) {
1916 val = 0; /* Clear SFSR, Fault address */
1917 env->dmmu.sfar = 0;
1919 env->dmmu.sfsr = val;
1920 break;
1921 case 1: /* Primary context */
1922 env->dmmu.mmu_primary_context = val;
1923 /* can be optimized to only flush MMU_USER_IDX
1924 and MMU_KERNEL_IDX entries */
1925 tlb_flush(env, 1);
1926 break;
1927 case 2: /* Secondary context */
1928 env->dmmu.mmu_secondary_context = val;
1929 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
1930 and MMU_KERNEL_SECONDARY_IDX entries */
1931 tlb_flush(env, 1);
1932 break;
1933 case 5: /* TSB access */
1934 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
1935 PRIx64 "\n", env->dmmu.tsb, val);
1936 env->dmmu.tsb = val;
1937 break;
1938 case 6: /* Tag access */
1939 env->dmmu.tag_access = val;
1940 break;
1941 case 7: /* Virtual Watchpoint */
1942 case 8: /* Physical Watchpoint */
1943 default:
1944 env->dmmuregs[reg] = val;
1945 break;
1948 if (oldreg != env->dmmuregs[reg]) {
1949 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1950 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
1952 #ifdef DEBUG_MMU
1953 dump_mmu(stdout, fprintf, env);
1954 #endif
1955 return;
1957 case 0x5c: /* D-MMU data in */
1958 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
1959 return;
1960 case 0x5d: /* D-MMU data access */
1962 unsigned int i = (addr >> 3) & 0x3f;
1964 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
1966 #ifdef DEBUG_MMU
1967 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
1968 dump_mmu(stdout, fprintf, env);
1969 #endif
1970 return;
1972 case 0x5f: /* D-MMU demap */
1973 demap_tlb(env->dtlb, addr, "dmmu", env);
1974 return;
1975 case 0x49: /* Interrupt data receive */
1976 env->ivec_status = val & 0x20;
1977 return;
1978 case 0x46: /* D-cache data */
1979 case 0x47: /* D-cache tag access */
1980 case 0x4b: /* E-cache error enable */
1981 case 0x4c: /* E-cache asynchronous fault status */
1982 case 0x4d: /* E-cache asynchronous fault address */
1983 case 0x4e: /* E-cache tag data */
1984 case 0x66: /* I-cache instruction access */
1985 case 0x67: /* I-cache tag access */
1986 case 0x6e: /* I-cache predecode */
1987 case 0x6f: /* I-cache LRU etc. */
1988 case 0x76: /* E-cache tag */
1989 case 0x7e: /* E-cache tag */
1990 return;
1991 case 0x51: /* I-MMU 8k TSB pointer, RO */
1992 case 0x52: /* I-MMU 64k TSB pointer, RO */
1993 case 0x56: /* I-MMU tag read, RO */
1994 case 0x59: /* D-MMU 8k TSB pointer, RO */
1995 case 0x5a: /* D-MMU 64k TSB pointer, RO */
1996 case 0x5b: /* D-MMU data pointer, RO */
1997 case 0x5e: /* D-MMU tag read, RO */
1998 case 0x48: /* Interrupt dispatch, RO */
1999 case 0x7f: /* Incoming interrupt vector, RO */
2000 case 0x82: /* Primary no-fault, RO */
2001 case 0x83: /* Secondary no-fault, RO */
2002 case 0x8a: /* Primary no-fault LE, RO */
2003 case 0x8b: /* Secondary no-fault LE, RO */
2004 default:
2005 cpu_unassigned_access(env, addr, 1, 0, 1, size);
2006 return;
2009 #endif /* CONFIG_USER_ONLY */
2011 void helper_ldda_asi(CPUSPARCState *env, target_ulong addr, int asi, int rd)
2013 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2014 || (cpu_has_hypervisor(env)
2015 && asi >= 0x30 && asi < 0x80
2016 && !(env->hpstate & HS_PRIV))) {
2017 helper_raise_exception(env, TT_PRIV_ACT);
2020 addr = asi_address_mask(env, asi, addr);
2022 switch (asi) {
2023 #if !defined(CONFIG_USER_ONLY)
2024 case 0x24: /* Nucleus quad LDD 128 bit atomic */
2025 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE */
2026 helper_check_align(env, addr, 0xf);
2027 if (rd == 0) {
2028 env->gregs[1] = cpu_ldq_nucleus(env, addr + 8);
2029 if (asi == 0x2c) {
2030 bswap64s(&env->gregs[1]);
2032 } else if (rd < 8) {
2033 env->gregs[rd] = cpu_ldq_nucleus(env, addr);
2034 env->gregs[rd + 1] = cpu_ldq_nucleus(env, addr + 8);
2035 if (asi == 0x2c) {
2036 bswap64s(&env->gregs[rd]);
2037 bswap64s(&env->gregs[rd + 1]);
2039 } else {
2040 env->regwptr[rd] = cpu_ldq_nucleus(env, addr);
2041 env->regwptr[rd + 1] = cpu_ldq_nucleus(env, addr + 8);
2042 if (asi == 0x2c) {
2043 bswap64s(&env->regwptr[rd]);
2044 bswap64s(&env->regwptr[rd + 1]);
2047 break;
2048 #endif
2049 default:
2050 helper_check_align(env, addr, 0x3);
2051 if (rd == 0) {
2052 env->gregs[1] = helper_ld_asi(env, addr + 4, asi, 4, 0);
2053 } else if (rd < 8) {
2054 env->gregs[rd] = helper_ld_asi(env, addr, asi, 4, 0);
2055 env->gregs[rd + 1] = helper_ld_asi(env, addr + 4, asi, 4, 0);
2056 } else {
2057 env->regwptr[rd] = helper_ld_asi(env, addr, asi, 4, 0);
2058 env->regwptr[rd + 1] = helper_ld_asi(env, addr + 4, asi, 4, 0);
2060 break;
2064 void helper_ldf_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
2065 int rd)
2067 unsigned int i;
2068 target_ulong val;
2070 helper_check_align(env, addr, 3);
2071 addr = asi_address_mask(env, asi, addr);
2073 switch (asi) {
2074 case 0xf0: /* UA2007/JPS1 Block load primary */
2075 case 0xf1: /* UA2007/JPS1 Block load secondary */
2076 case 0xf8: /* UA2007/JPS1 Block load primary LE */
2077 case 0xf9: /* UA2007/JPS1 Block load secondary LE */
2078 if (rd & 7) {
2079 helper_raise_exception(env, TT_ILL_INSN);
2080 return;
2082 helper_check_align(env, addr, 0x3f);
2083 for (i = 0; i < 8; i++, rd += 2, addr += 8) {
2084 env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi & 0x8f, 8, 0);
2086 return;
2088 case 0x16: /* UA2007 Block load primary, user privilege */
2089 case 0x17: /* UA2007 Block load secondary, user privilege */
2090 case 0x1e: /* UA2007 Block load primary LE, user privilege */
2091 case 0x1f: /* UA2007 Block load secondary LE, user privilege */
2092 case 0x70: /* JPS1 Block load primary, user privilege */
2093 case 0x71: /* JPS1 Block load secondary, user privilege */
2094 case 0x78: /* JPS1 Block load primary LE, user privilege */
2095 case 0x79: /* JPS1 Block load secondary LE, user privilege */
2096 if (rd & 7) {
2097 helper_raise_exception(env, TT_ILL_INSN);
2098 return;
2100 helper_check_align(env, addr, 0x3f);
2101 for (i = 0; i < 8; i++, rd += 2, addr += 4) {
2102 env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi & 0x19, 8, 0);
2104 return;
2106 default:
2107 break;
2110 switch (size) {
2111 default:
2112 case 4:
2113 val = helper_ld_asi(env, addr, asi, size, 0);
2114 if (rd & 1) {
2115 env->fpr[rd / 2].l.lower = val;
2116 } else {
2117 env->fpr[rd / 2].l.upper = val;
2119 break;
2120 case 8:
2121 env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi, size, 0);
2122 break;
2123 case 16:
2124 env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi, 8, 0);
2125 env->fpr[rd / 2 + 1].ll = helper_ld_asi(env, addr + 8, asi, 8, 0);
2126 break;
2130 void helper_stf_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
2131 int rd)
2133 unsigned int i;
2134 target_ulong val;
2136 helper_check_align(env, addr, 3);
2137 addr = asi_address_mask(env, asi, addr);
2139 switch (asi) {
2140 case 0xe0: /* UA2007/JPS1 Block commit store primary (cache flush) */
2141 case 0xe1: /* UA2007/JPS1 Block commit store secondary (cache flush) */
2142 case 0xf0: /* UA2007/JPS1 Block store primary */
2143 case 0xf1: /* UA2007/JPS1 Block store secondary */
2144 case 0xf8: /* UA2007/JPS1 Block store primary LE */
2145 case 0xf9: /* UA2007/JPS1 Block store secondary LE */
2146 if (rd & 7) {
2147 helper_raise_exception(env, TT_ILL_INSN);
2148 return;
2150 helper_check_align(env, addr, 0x3f);
2151 for (i = 0; i < 8; i++, rd += 2, addr += 8) {
2152 helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi & 0x8f, 8);
2155 return;
2156 case 0x16: /* UA2007 Block load primary, user privilege */
2157 case 0x17: /* UA2007 Block load secondary, user privilege */
2158 case 0x1e: /* UA2007 Block load primary LE, user privilege */
2159 case 0x1f: /* UA2007 Block load secondary LE, user privilege */
2160 case 0x70: /* JPS1 Block store primary, user privilege */
2161 case 0x71: /* JPS1 Block store secondary, user privilege */
2162 case 0x78: /* JPS1 Block load primary LE, user privilege */
2163 case 0x79: /* JPS1 Block load secondary LE, user privilege */
2164 if (rd & 7) {
2165 helper_raise_exception(env, TT_ILL_INSN);
2166 return;
2168 helper_check_align(env, addr, 0x3f);
2169 for (i = 0; i < 8; i++, rd += 2, addr += 8) {
2170 helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi & 0x19, 8);
2173 return;
2174 default:
2175 break;
2178 switch (size) {
2179 default:
2180 case 4:
2181 if (rd & 1) {
2182 val = env->fpr[rd / 2].l.lower;
2183 } else {
2184 val = env->fpr[rd / 2].l.upper;
2186 helper_st_asi(env, addr, val, asi, size);
2187 break;
2188 case 8:
2189 helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi, size);
2190 break;
2191 case 16:
2192 helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi, 8);
2193 helper_st_asi(env, addr + 8, env->fpr[rd / 2 + 1].ll, asi, 8);
2194 break;
2198 target_ulong helper_cas_asi(CPUSPARCState *env, target_ulong addr,
2199 target_ulong val1, target_ulong val2, uint32_t asi)
2201 target_ulong ret;
2203 val2 &= 0xffffffffUL;
2204 ret = helper_ld_asi(env, addr, asi, 4, 0);
2205 ret &= 0xffffffffUL;
2206 if (val2 == ret) {
2207 helper_st_asi(env, addr, val1 & 0xffffffffUL, asi, 4);
2209 return ret;
2212 target_ulong helper_casx_asi(CPUSPARCState *env, target_ulong addr,
2213 target_ulong val1, target_ulong val2,
2214 uint32_t asi)
2216 target_ulong ret;
2218 ret = helper_ld_asi(env, addr, asi, 8, 0);
2219 if (val2 == ret) {
2220 helper_st_asi(env, addr, val1, asi, 8);
2222 return ret;
2224 #endif /* TARGET_SPARC64 */
2226 void helper_ldqf(CPUSPARCState *env, target_ulong addr, int mem_idx)
2228 /* XXX add 128 bit load */
2229 CPU_QuadU u;
2231 helper_check_align(env, addr, 7);
2232 #if !defined(CONFIG_USER_ONLY)
2233 switch (mem_idx) {
2234 case MMU_USER_IDX:
2235 u.ll.upper = cpu_ldq_user(env, addr);
2236 u.ll.lower = cpu_ldq_user(env, addr + 8);
2237 QT0 = u.q;
2238 break;
2239 case MMU_KERNEL_IDX:
2240 u.ll.upper = cpu_ldq_kernel(env, addr);
2241 u.ll.lower = cpu_ldq_kernel(env, addr + 8);
2242 QT0 = u.q;
2243 break;
2244 #ifdef TARGET_SPARC64
2245 case MMU_HYPV_IDX:
2246 u.ll.upper = cpu_ldq_hypv(env, addr);
2247 u.ll.lower = cpu_ldq_hypv(env, addr + 8);
2248 QT0 = u.q;
2249 break;
2250 #endif
2251 default:
2252 DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx);
2253 break;
2255 #else
2256 u.ll.upper = ldq_raw(address_mask(env, addr));
2257 u.ll.lower = ldq_raw(address_mask(env, addr + 8));
2258 QT0 = u.q;
2259 #endif
2262 void helper_stqf(CPUSPARCState *env, target_ulong addr, int mem_idx)
2264 /* XXX add 128 bit store */
2265 CPU_QuadU u;
2267 helper_check_align(env, addr, 7);
2268 #if !defined(CONFIG_USER_ONLY)
2269 switch (mem_idx) {
2270 case MMU_USER_IDX:
2271 u.q = QT0;
2272 cpu_stq_user(env, addr, u.ll.upper);
2273 cpu_stq_user(env, addr + 8, u.ll.lower);
2274 break;
2275 case MMU_KERNEL_IDX:
2276 u.q = QT0;
2277 cpu_stq_kernel(env, addr, u.ll.upper);
2278 cpu_stq_kernel(env, addr + 8, u.ll.lower);
2279 break;
2280 #ifdef TARGET_SPARC64
2281 case MMU_HYPV_IDX:
2282 u.q = QT0;
2283 cpu_stq_hypv(env, addr, u.ll.upper);
2284 cpu_stq_hypv(env, addr + 8, u.ll.lower);
2285 break;
2286 #endif
2287 default:
2288 DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx);
2289 break;
2291 #else
2292 u.q = QT0;
2293 stq_raw(address_mask(env, addr), u.ll.upper);
2294 stq_raw(address_mask(env, addr + 8), u.ll.lower);
2295 #endif
2298 #if !defined(CONFIG_USER_ONLY)
2299 #ifndef TARGET_SPARC64
2300 void cpu_unassigned_access(CPUSPARCState *env, target_phys_addr_t addr,
2301 int is_write, int is_exec, int is_asi, int size)
2303 int fault_type;
2305 #ifdef DEBUG_UNASSIGNED
2306 if (is_asi) {
2307 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
2308 " asi 0x%02x from " TARGET_FMT_lx "\n",
2309 is_exec ? "exec" : is_write ? "write" : "read", size,
2310 size == 1 ? "" : "s", addr, is_asi, env->pc);
2311 } else {
2312 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
2313 " from " TARGET_FMT_lx "\n",
2314 is_exec ? "exec" : is_write ? "write" : "read", size,
2315 size == 1 ? "" : "s", addr, env->pc);
2317 #endif
2318 /* Don't overwrite translation and access faults */
2319 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
2320 if ((fault_type > 4) || (fault_type == 0)) {
2321 env->mmuregs[3] = 0; /* Fault status register */
2322 if (is_asi) {
2323 env->mmuregs[3] |= 1 << 16;
2325 if (env->psrs) {
2326 env->mmuregs[3] |= 1 << 5;
2328 if (is_exec) {
2329 env->mmuregs[3] |= 1 << 6;
2331 if (is_write) {
2332 env->mmuregs[3] |= 1 << 7;
2334 env->mmuregs[3] |= (5 << 2) | 2;
2335 /* SuperSPARC will never place instruction fault addresses in the FAR */
2336 if (!is_exec) {
2337 env->mmuregs[4] = addr; /* Fault address register */
2340 /* overflow (same type fault was not read before another fault) */
2341 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
2342 env->mmuregs[3] |= 1;
2345 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
2346 if (is_exec) {
2347 helper_raise_exception(env, TT_CODE_ACCESS);
2348 } else {
2349 helper_raise_exception(env, TT_DATA_ACCESS);
2353 /* flush neverland mappings created during no-fault mode,
2354 so the sequential MMU faults report proper fault types */
2355 if (env->mmuregs[0] & MMU_NF) {
2356 tlb_flush(env, 1);
2359 #else
2360 void cpu_unassigned_access(CPUSPARCState *env, target_phys_addr_t addr,
2361 int is_write, int is_exec, int is_asi, int size)
2363 #ifdef DEBUG_UNASSIGNED
2364 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
2365 "\n", addr, env->pc);
2366 #endif
2368 if (is_exec) {
2369 helper_raise_exception(env, TT_CODE_ACCESS);
2370 } else {
2371 helper_raise_exception(env, TT_DATA_ACCESS);
2374 #endif
2375 #endif
2377 #if !defined(CONFIG_USER_ONLY)
2378 /* XXX: make it generic ? */
2379 static void cpu_restore_state2(CPUSPARCState *env, uintptr_t retaddr)
2381 TranslationBlock *tb;
2383 if (retaddr) {
2384 /* now we have a real cpu fault */
2385 tb = tb_find_pc(retaddr);
2386 if (tb) {
2387 /* the PC is inside the translated code. It means that we have
2388 a virtual CPU fault */
2389 cpu_restore_state(tb, env, retaddr);
2394 void do_unaligned_access(CPUSPARCState *env, target_ulong addr, int is_write,
2395 int is_user, uintptr_t retaddr)
2397 #ifdef DEBUG_UNALIGNED
2398 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
2399 "\n", addr, env->pc);
2400 #endif
2401 cpu_restore_state2(env, retaddr);
2402 helper_raise_exception(env, TT_UNALIGNED);
2405 /* try to fill the TLB and return an exception if error. If retaddr is
2406 NULL, it means that the function was called in C code (i.e. not
2407 from generated code or from helper.c) */
2408 /* XXX: fix it to restore all registers */
2409 void tlb_fill(CPUSPARCState *env, target_ulong addr, int is_write, int mmu_idx,
2410 uintptr_t retaddr)
2412 int ret;
2414 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx);
2415 if (ret) {
2416 cpu_restore_state2(env, retaddr);
2417 cpu_loop_exit(env);
2420 #endif