disable automatic loading of sgabios when -nographic
[qemu.git] / target-sparc / ldst_helper.c
blobb59707ecd2e384b2184047388c25020d89351c4f
1 /*
2 * Helpers for loads and stores
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "dyngen-exec.h"
22 #include "helper.h"
24 #if !defined(CONFIG_USER_ONLY)
25 #include "softmmu_exec.h"
26 #endif
28 //#define DEBUG_MMU
29 //#define DEBUG_MXCC
30 //#define DEBUG_UNALIGNED
31 //#define DEBUG_UNASSIGNED
32 //#define DEBUG_ASI
33 //#define DEBUG_CACHE_CONTROL
35 #ifdef DEBUG_MMU
36 #define DPRINTF_MMU(fmt, ...) \
37 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
38 #else
39 #define DPRINTF_MMU(fmt, ...) do {} while (0)
40 #endif
42 #ifdef DEBUG_MXCC
43 #define DPRINTF_MXCC(fmt, ...) \
44 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
45 #else
46 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
47 #endif
49 #ifdef DEBUG_ASI
50 #define DPRINTF_ASI(fmt, ...) \
51 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
52 #endif
54 #ifdef DEBUG_CACHE_CONTROL
55 #define DPRINTF_CACHE_CONTROL(fmt, ...) \
56 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
57 #else
58 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
59 #endif
61 #ifdef TARGET_SPARC64
62 #ifndef TARGET_ABI32
63 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
64 #else
65 #define AM_CHECK(env1) (1)
66 #endif
67 #endif
69 #define QT0 (env->qt0)
70 #define QT1 (env->qt1)
72 #if !defined(CONFIG_USER_ONLY)
73 static void do_unassigned_access(target_phys_addr_t addr, int is_write,
74 int is_exec, int is_asi, int size);
75 #else
76 #ifdef TARGET_SPARC64
77 static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
78 int is_asi, int size);
79 #endif
80 #endif
82 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
83 /* Calculates TSB pointer value for fault page size 8k or 64k */
84 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
85 uint64_t tag_access_register,
86 int page_size)
88 uint64_t tsb_base = tsb_register & ~0x1fffULL;
89 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
90 int tsb_size = tsb_register & 0xf;
92 /* discard lower 13 bits which hold tag access context */
93 uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
95 /* now reorder bits */
96 uint64_t tsb_base_mask = ~0x1fffULL;
97 uint64_t va = tag_access_va;
99 /* move va bits to correct position */
100 if (page_size == 8*1024) {
101 va >>= 9;
102 } else if (page_size == 64*1024) {
103 va >>= 12;
106 if (tsb_size) {
107 tsb_base_mask <<= tsb_size;
110 /* calculate tsb_base mask and adjust va if split is in use */
111 if (tsb_split) {
112 if (page_size == 8*1024) {
113 va &= ~(1ULL << (13 + tsb_size));
114 } else if (page_size == 64*1024) {
115 va |= (1ULL << (13 + tsb_size));
117 tsb_base_mask <<= 1;
120 return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
123 /* Calculates tag target register value by reordering bits
124 in tag access register */
125 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
127 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
130 static void replace_tlb_entry(SparcTLBEntry *tlb,
131 uint64_t tlb_tag, uint64_t tlb_tte,
132 CPUState *env1)
134 target_ulong mask, size, va, offset;
136 /* flush page range if translation is valid */
137 if (TTE_IS_VALID(tlb->tte)) {
139 mask = 0xffffffffffffe000ULL;
140 mask <<= 3 * ((tlb->tte >> 61) & 3);
141 size = ~mask + 1;
143 va = tlb->tag & mask;
145 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
146 tlb_flush_page(env1, va + offset);
150 tlb->tag = tlb_tag;
151 tlb->tte = tlb_tte;
154 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
155 const char *strmmu, CPUState *env1)
157 unsigned int i;
158 target_ulong mask;
159 uint64_t context;
161 int is_demap_context = (demap_addr >> 6) & 1;
163 /* demap context */
164 switch ((demap_addr >> 4) & 3) {
165 case 0: /* primary */
166 context = env1->dmmu.mmu_primary_context;
167 break;
168 case 1: /* secondary */
169 context = env1->dmmu.mmu_secondary_context;
170 break;
171 case 2: /* nucleus */
172 context = 0;
173 break;
174 case 3: /* reserved */
175 default:
176 return;
179 for (i = 0; i < 64; i++) {
180 if (TTE_IS_VALID(tlb[i].tte)) {
182 if (is_demap_context) {
183 /* will remove non-global entries matching context value */
184 if (TTE_IS_GLOBAL(tlb[i].tte) ||
185 !tlb_compare_context(&tlb[i], context)) {
186 continue;
188 } else {
189 /* demap page
190 will remove any entry matching VA */
191 mask = 0xffffffffffffe000ULL;
192 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
194 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
195 continue;
198 /* entry should be global or matching context value */
199 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
200 !tlb_compare_context(&tlb[i], context)) {
201 continue;
205 replace_tlb_entry(&tlb[i], 0, 0, env1);
206 #ifdef DEBUG_MMU
207 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
208 dump_mmu(stdout, fprintf, env1);
209 #endif
214 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
215 uint64_t tlb_tag, uint64_t tlb_tte,
216 const char *strmmu, CPUState *env1)
218 unsigned int i, replace_used;
220 /* Try replacing invalid entry */
221 for (i = 0; i < 64; i++) {
222 if (!TTE_IS_VALID(tlb[i].tte)) {
223 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
224 #ifdef DEBUG_MMU
225 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
226 dump_mmu(stdout, fprintf, env1);
227 #endif
228 return;
232 /* All entries are valid, try replacing unlocked entry */
234 for (replace_used = 0; replace_used < 2; ++replace_used) {
236 /* Used entries are not replaced on first pass */
238 for (i = 0; i < 64; i++) {
239 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
241 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
242 #ifdef DEBUG_MMU
243 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
244 strmmu, (replace_used ? "used" : "unused"), i);
245 dump_mmu(stdout, fprintf, env1);
246 #endif
247 return;
251 /* Now reset used bit and search for unused entries again */
253 for (i = 0; i < 64; i++) {
254 TTE_SET_UNUSED(tlb[i].tte);
258 #ifdef DEBUG_MMU
259 DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
260 #endif
261 /* error state? */
264 #endif
266 static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
268 #ifdef TARGET_SPARC64
269 if (AM_CHECK(env1)) {
270 addr &= 0xffffffffULL;
272 #endif
273 return addr;
276 /* returns true if access using this ASI is to have address translated by MMU
277 otherwise access is to raw physical address */
278 static inline int is_translating_asi(int asi)
280 #ifdef TARGET_SPARC64
281 /* Ultrasparc IIi translating asi
282 - note this list is defined by cpu implementation
284 switch (asi) {
285 case 0x04 ... 0x11:
286 case 0x16 ... 0x19:
287 case 0x1E ... 0x1F:
288 case 0x24 ... 0x2C:
289 case 0x70 ... 0x73:
290 case 0x78 ... 0x79:
291 case 0x80 ... 0xFF:
292 return 1;
294 default:
295 return 0;
297 #else
298 /* TODO: check sparc32 bits */
299 return 0;
300 #endif
303 static inline target_ulong asi_address_mask(CPUState *env1,
304 int asi, target_ulong addr)
306 if (is_translating_asi(asi)) {
307 return address_mask(env, addr);
308 } else {
309 return addr;
313 void helper_check_align(target_ulong addr, uint32_t align)
315 if (addr & align) {
316 #ifdef DEBUG_UNALIGNED
317 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
318 "\n", addr, env->pc);
319 #endif
320 helper_raise_exception(env, TT_UNALIGNED);
324 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
325 defined(DEBUG_MXCC)
326 static void dump_mxcc(CPUState *env)
328 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
329 "\n",
330 env->mxccdata[0], env->mxccdata[1],
331 env->mxccdata[2], env->mxccdata[3]);
332 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
333 "\n"
334 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
335 "\n",
336 env->mxccregs[0], env->mxccregs[1],
337 env->mxccregs[2], env->mxccregs[3],
338 env->mxccregs[4], env->mxccregs[5],
339 env->mxccregs[6], env->mxccregs[7]);
341 #endif
343 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
344 && defined(DEBUG_ASI)
345 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
346 uint64_t r1)
348 switch (size) {
349 case 1:
350 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
351 addr, asi, r1 & 0xff);
352 break;
353 case 2:
354 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
355 addr, asi, r1 & 0xffff);
356 break;
357 case 4:
358 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
359 addr, asi, r1 & 0xffffffff);
360 break;
361 case 8:
362 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
363 addr, asi, r1);
364 break;
367 #endif
369 #ifndef TARGET_SPARC64
370 #ifndef CONFIG_USER_ONLY
373 /* Leon3 cache control */
375 static void leon3_cache_control_st(target_ulong addr, uint64_t val, int size)
377 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
378 addr, val, size);
380 if (size != 4) {
381 DPRINTF_CACHE_CONTROL("32bits only\n");
382 return;
385 switch (addr) {
386 case 0x00: /* Cache control */
388 /* These values must always be read as zeros */
389 val &= ~CACHE_CTRL_FD;
390 val &= ~CACHE_CTRL_FI;
391 val &= ~CACHE_CTRL_IB;
392 val &= ~CACHE_CTRL_IP;
393 val &= ~CACHE_CTRL_DP;
395 env->cache_control = val;
396 break;
397 case 0x04: /* Instruction cache configuration */
398 case 0x08: /* Data cache configuration */
399 /* Read Only */
400 break;
401 default:
402 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
403 break;
407 static uint64_t leon3_cache_control_ld(target_ulong addr, int size)
409 uint64_t ret = 0;
411 if (size != 4) {
412 DPRINTF_CACHE_CONTROL("32bits only\n");
413 return 0;
416 switch (addr) {
417 case 0x00: /* Cache control */
418 ret = env->cache_control;
419 break;
421 /* Configuration registers are read and only always keep those
422 predefined values */
424 case 0x04: /* Instruction cache configuration */
425 ret = 0x10220000;
426 break;
427 case 0x08: /* Data cache configuration */
428 ret = 0x18220000;
429 break;
430 default:
431 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
432 break;
434 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
435 addr, ret, size);
436 return ret;
439 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
441 uint64_t ret = 0;
442 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
443 uint32_t last_addr = addr;
444 #endif
446 helper_check_align(addr, size - 1);
447 switch (asi) {
448 case 2: /* SuperSparc MXCC registers and Leon3 cache control */
449 switch (addr) {
450 case 0x00: /* Leon3 Cache Control */
451 case 0x08: /* Leon3 Instruction Cache config */
452 case 0x0C: /* Leon3 Date Cache config */
453 if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
454 ret = leon3_cache_control_ld(addr, size);
456 break;
457 case 0x01c00a00: /* MXCC control register */
458 if (size == 8) {
459 ret = env->mxccregs[3];
460 } else {
461 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
462 size);
464 break;
465 case 0x01c00a04: /* MXCC control register */
466 if (size == 4) {
467 ret = env->mxccregs[3];
468 } else {
469 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
470 size);
472 break;
473 case 0x01c00c00: /* Module reset register */
474 if (size == 8) {
475 ret = env->mxccregs[5];
476 /* should we do something here? */
477 } else {
478 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
479 size);
481 break;
482 case 0x01c00f00: /* MBus port address register */
483 if (size == 8) {
484 ret = env->mxccregs[7];
485 } else {
486 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
487 size);
489 break;
490 default:
491 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
492 size);
493 break;
495 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
496 "addr = %08x -> ret = %" PRIx64 ","
497 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
498 #ifdef DEBUG_MXCC
499 dump_mxcc(env);
500 #endif
501 break;
502 case 3: /* MMU probe */
504 int mmulev;
506 mmulev = (addr >> 8) & 15;
507 if (mmulev > 4) {
508 ret = 0;
509 } else {
510 ret = mmu_probe(env, addr, mmulev);
512 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
513 addr, mmulev, ret);
515 break;
516 case 4: /* read MMU regs */
518 int reg = (addr >> 8) & 0x1f;
520 ret = env->mmuregs[reg];
521 if (reg == 3) { /* Fault status cleared on read */
522 env->mmuregs[3] = 0;
523 } else if (reg == 0x13) { /* Fault status read */
524 ret = env->mmuregs[3];
525 } else if (reg == 0x14) { /* Fault address read */
526 ret = env->mmuregs[4];
528 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
530 break;
531 case 5: /* Turbosparc ITLB Diagnostic */
532 case 6: /* Turbosparc DTLB Diagnostic */
533 case 7: /* Turbosparc IOTLB Diagnostic */
534 break;
535 case 9: /* Supervisor code access */
536 switch (size) {
537 case 1:
538 ret = ldub_code(addr);
539 break;
540 case 2:
541 ret = lduw_code(addr);
542 break;
543 default:
544 case 4:
545 ret = ldl_code(addr);
546 break;
547 case 8:
548 ret = ldq_code(addr);
549 break;
551 break;
552 case 0xa: /* User data access */
553 switch (size) {
554 case 1:
555 ret = ldub_user(addr);
556 break;
557 case 2:
558 ret = lduw_user(addr);
559 break;
560 default:
561 case 4:
562 ret = ldl_user(addr);
563 break;
564 case 8:
565 ret = ldq_user(addr);
566 break;
568 break;
569 case 0xb: /* Supervisor data access */
570 switch (size) {
571 case 1:
572 ret = ldub_kernel(addr);
573 break;
574 case 2:
575 ret = lduw_kernel(addr);
576 break;
577 default:
578 case 4:
579 ret = ldl_kernel(addr);
580 break;
581 case 8:
582 ret = ldq_kernel(addr);
583 break;
585 break;
586 case 0xc: /* I-cache tag */
587 case 0xd: /* I-cache data */
588 case 0xe: /* D-cache tag */
589 case 0xf: /* D-cache data */
590 break;
591 case 0x20: /* MMU passthrough */
592 switch (size) {
593 case 1:
594 ret = ldub_phys(addr);
595 break;
596 case 2:
597 ret = lduw_phys(addr);
598 break;
599 default:
600 case 4:
601 ret = ldl_phys(addr);
602 break;
603 case 8:
604 ret = ldq_phys(addr);
605 break;
607 break;
608 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
609 switch (size) {
610 case 1:
611 ret = ldub_phys((target_phys_addr_t)addr
612 | ((target_phys_addr_t)(asi & 0xf) << 32));
613 break;
614 case 2:
615 ret = lduw_phys((target_phys_addr_t)addr
616 | ((target_phys_addr_t)(asi & 0xf) << 32));
617 break;
618 default:
619 case 4:
620 ret = ldl_phys((target_phys_addr_t)addr
621 | ((target_phys_addr_t)(asi & 0xf) << 32));
622 break;
623 case 8:
624 ret = ldq_phys((target_phys_addr_t)addr
625 | ((target_phys_addr_t)(asi & 0xf) << 32));
626 break;
628 break;
629 case 0x30: /* Turbosparc secondary cache diagnostic */
630 case 0x31: /* Turbosparc RAM snoop */
631 case 0x32: /* Turbosparc page table descriptor diagnostic */
632 case 0x39: /* data cache diagnostic register */
633 ret = 0;
634 break;
635 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
637 int reg = (addr >> 8) & 3;
639 switch (reg) {
640 case 0: /* Breakpoint Value (Addr) */
641 ret = env->mmubpregs[reg];
642 break;
643 case 1: /* Breakpoint Mask */
644 ret = env->mmubpregs[reg];
645 break;
646 case 2: /* Breakpoint Control */
647 ret = env->mmubpregs[reg];
648 break;
649 case 3: /* Breakpoint Status */
650 ret = env->mmubpregs[reg];
651 env->mmubpregs[reg] = 0ULL;
652 break;
654 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
655 ret);
657 break;
658 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
659 ret = env->mmubpctrv;
660 break;
661 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
662 ret = env->mmubpctrc;
663 break;
664 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
665 ret = env->mmubpctrs;
666 break;
667 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
668 ret = env->mmubpaction;
669 break;
670 case 8: /* User code access, XXX */
671 default:
672 do_unassigned_access(addr, 0, 0, asi, size);
673 ret = 0;
674 break;
676 if (sign) {
677 switch (size) {
678 case 1:
679 ret = (int8_t) ret;
680 break;
681 case 2:
682 ret = (int16_t) ret;
683 break;
684 case 4:
685 ret = (int32_t) ret;
686 break;
687 default:
688 break;
691 #ifdef DEBUG_ASI
692 dump_asi("read ", last_addr, asi, size, ret);
693 #endif
694 return ret;
697 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
699 helper_check_align(addr, size - 1);
700 switch (asi) {
701 case 2: /* SuperSparc MXCC registers and Leon3 cache control */
702 switch (addr) {
703 case 0x00: /* Leon3 Cache Control */
704 case 0x08: /* Leon3 Instruction Cache config */
705 case 0x0C: /* Leon3 Date Cache config */
706 if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
707 leon3_cache_control_st(addr, val, size);
709 break;
711 case 0x01c00000: /* MXCC stream data register 0 */
712 if (size == 8) {
713 env->mxccdata[0] = val;
714 } else {
715 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
716 size);
718 break;
719 case 0x01c00008: /* MXCC stream data register 1 */
720 if (size == 8) {
721 env->mxccdata[1] = val;
722 } else {
723 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
724 size);
726 break;
727 case 0x01c00010: /* MXCC stream data register 2 */
728 if (size == 8) {
729 env->mxccdata[2] = val;
730 } else {
731 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
732 size);
734 break;
735 case 0x01c00018: /* MXCC stream data register 3 */
736 if (size == 8) {
737 env->mxccdata[3] = val;
738 } else {
739 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
740 size);
742 break;
743 case 0x01c00100: /* MXCC stream source */
744 if (size == 8) {
745 env->mxccregs[0] = val;
746 } else {
747 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
748 size);
750 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
752 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
754 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
755 16);
756 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
757 24);
758 break;
759 case 0x01c00200: /* MXCC stream destination */
760 if (size == 8) {
761 env->mxccregs[1] = val;
762 } else {
763 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
764 size);
766 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
767 env->mxccdata[0]);
768 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
769 env->mxccdata[1]);
770 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
771 env->mxccdata[2]);
772 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
773 env->mxccdata[3]);
774 break;
775 case 0x01c00a00: /* MXCC control register */
776 if (size == 8) {
777 env->mxccregs[3] = val;
778 } else {
779 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
780 size);
782 break;
783 case 0x01c00a04: /* MXCC control register */
784 if (size == 4) {
785 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
786 | val;
787 } else {
788 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
789 size);
791 break;
792 case 0x01c00e00: /* MXCC error register */
793 /* writing a 1 bit clears the error */
794 if (size == 8) {
795 env->mxccregs[6] &= ~val;
796 } else {
797 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
798 size);
800 break;
801 case 0x01c00f00: /* MBus port address register */
802 if (size == 8) {
803 env->mxccregs[7] = val;
804 } else {
805 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
806 size);
808 break;
809 default:
810 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
811 size);
812 break;
814 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
815 asi, size, addr, val);
816 #ifdef DEBUG_MXCC
817 dump_mxcc(env);
818 #endif
819 break;
820 case 3: /* MMU flush */
822 int mmulev;
824 mmulev = (addr >> 8) & 15;
825 DPRINTF_MMU("mmu flush level %d\n", mmulev);
826 switch (mmulev) {
827 case 0: /* flush page */
828 tlb_flush_page(env, addr & 0xfffff000);
829 break;
830 case 1: /* flush segment (256k) */
831 case 2: /* flush region (16M) */
832 case 3: /* flush context (4G) */
833 case 4: /* flush entire */
834 tlb_flush(env, 1);
835 break;
836 default:
837 break;
839 #ifdef DEBUG_MMU
840 dump_mmu(stdout, fprintf, env);
841 #endif
843 break;
844 case 4: /* write MMU regs */
846 int reg = (addr >> 8) & 0x1f;
847 uint32_t oldreg;
849 oldreg = env->mmuregs[reg];
850 switch (reg) {
851 case 0: /* Control Register */
852 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
853 (val & 0x00ffffff);
854 /* Mappings generated during no-fault mode or MMU
855 disabled mode are invalid in normal mode */
856 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
857 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm))) {
858 tlb_flush(env, 1);
860 break;
861 case 1: /* Context Table Pointer Register */
862 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
863 break;
864 case 2: /* Context Register */
865 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
866 if (oldreg != env->mmuregs[reg]) {
867 /* we flush when the MMU context changes because
868 QEMU has no MMU context support */
869 tlb_flush(env, 1);
871 break;
872 case 3: /* Synchronous Fault Status Register with Clear */
873 case 4: /* Synchronous Fault Address Register */
874 break;
875 case 0x10: /* TLB Replacement Control Register */
876 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
877 break;
878 case 0x13: /* Synchronous Fault Status Register with Read
879 and Clear */
880 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
881 break;
882 case 0x14: /* Synchronous Fault Address Register */
883 env->mmuregs[4] = val;
884 break;
885 default:
886 env->mmuregs[reg] = val;
887 break;
889 if (oldreg != env->mmuregs[reg]) {
890 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
891 reg, oldreg, env->mmuregs[reg]);
893 #ifdef DEBUG_MMU
894 dump_mmu(stdout, fprintf, env);
895 #endif
897 break;
898 case 5: /* Turbosparc ITLB Diagnostic */
899 case 6: /* Turbosparc DTLB Diagnostic */
900 case 7: /* Turbosparc IOTLB Diagnostic */
901 break;
902 case 0xa: /* User data access */
903 switch (size) {
904 case 1:
905 stb_user(addr, val);
906 break;
907 case 2:
908 stw_user(addr, val);
909 break;
910 default:
911 case 4:
912 stl_user(addr, val);
913 break;
914 case 8:
915 stq_user(addr, val);
916 break;
918 break;
919 case 0xb: /* Supervisor data access */
920 switch (size) {
921 case 1:
922 stb_kernel(addr, val);
923 break;
924 case 2:
925 stw_kernel(addr, val);
926 break;
927 default:
928 case 4:
929 stl_kernel(addr, val);
930 break;
931 case 8:
932 stq_kernel(addr, val);
933 break;
935 break;
936 case 0xc: /* I-cache tag */
937 case 0xd: /* I-cache data */
938 case 0xe: /* D-cache tag */
939 case 0xf: /* D-cache data */
940 case 0x10: /* I/D-cache flush page */
941 case 0x11: /* I/D-cache flush segment */
942 case 0x12: /* I/D-cache flush region */
943 case 0x13: /* I/D-cache flush context */
944 case 0x14: /* I/D-cache flush user */
945 break;
946 case 0x17: /* Block copy, sta access */
948 /* val = src
949 addr = dst
950 copy 32 bytes */
951 unsigned int i;
952 uint32_t src = val & ~3, dst = addr & ~3, temp;
954 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
955 temp = ldl_kernel(src);
956 stl_kernel(dst, temp);
959 break;
960 case 0x1f: /* Block fill, stda access */
962 /* addr = dst
963 fill 32 bytes with val */
964 unsigned int i;
965 uint32_t dst = addr & 7;
967 for (i = 0; i < 32; i += 8, dst += 8) {
968 stq_kernel(dst, val);
971 break;
972 case 0x20: /* MMU passthrough */
974 switch (size) {
975 case 1:
976 stb_phys(addr, val);
977 break;
978 case 2:
979 stw_phys(addr, val);
980 break;
981 case 4:
982 default:
983 stl_phys(addr, val);
984 break;
985 case 8:
986 stq_phys(addr, val);
987 break;
990 break;
991 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
993 switch (size) {
994 case 1:
995 stb_phys((target_phys_addr_t)addr
996 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
997 break;
998 case 2:
999 stw_phys((target_phys_addr_t)addr
1000 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1001 break;
1002 case 4:
1003 default:
1004 stl_phys((target_phys_addr_t)addr
1005 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1006 break;
1007 case 8:
1008 stq_phys((target_phys_addr_t)addr
1009 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1010 break;
1013 break;
1014 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
1015 case 0x31: /* store buffer data, Ross RT620 I-cache flush or
1016 Turbosparc snoop RAM */
1017 case 0x32: /* store buffer control or Turbosparc page table
1018 descriptor diagnostic */
1019 case 0x36: /* I-cache flash clear */
1020 case 0x37: /* D-cache flash clear */
1021 break;
1022 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1024 int reg = (addr >> 8) & 3;
1026 switch (reg) {
1027 case 0: /* Breakpoint Value (Addr) */
1028 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1029 break;
1030 case 1: /* Breakpoint Mask */
1031 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1032 break;
1033 case 2: /* Breakpoint Control */
1034 env->mmubpregs[reg] = (val & 0x7fULL);
1035 break;
1036 case 3: /* Breakpoint Status */
1037 env->mmubpregs[reg] = (val & 0xfULL);
1038 break;
1040 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
1041 env->mmuregs[reg]);
1043 break;
1044 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1045 env->mmubpctrv = val & 0xffffffff;
1046 break;
1047 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1048 env->mmubpctrc = val & 0x3;
1049 break;
1050 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1051 env->mmubpctrs = val & 0x3;
1052 break;
1053 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1054 env->mmubpaction = val & 0x1fff;
1055 break;
1056 case 8: /* User code access, XXX */
1057 case 9: /* Supervisor code access, XXX */
1058 default:
1059 do_unassigned_access(addr, 1, 0, asi, size);
1060 break;
1062 #ifdef DEBUG_ASI
1063 dump_asi("write", addr, asi, size, val);
1064 #endif
1067 #endif /* CONFIG_USER_ONLY */
1068 #else /* TARGET_SPARC64 */
1070 #ifdef CONFIG_USER_ONLY
1071 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1073 uint64_t ret = 0;
1074 #if defined(DEBUG_ASI)
1075 target_ulong last_addr = addr;
1076 #endif
1078 if (asi < 0x80) {
1079 helper_raise_exception(env, TT_PRIV_ACT);
1082 helper_check_align(addr, size - 1);
1083 addr = asi_address_mask(env, asi, addr);
1085 switch (asi) {
1086 case 0x82: /* Primary no-fault */
1087 case 0x8a: /* Primary no-fault LE */
1088 if (page_check_range(addr, size, PAGE_READ) == -1) {
1089 #ifdef DEBUG_ASI
1090 dump_asi("read ", last_addr, asi, size, ret);
1091 #endif
1092 return 0;
1094 /* Fall through */
1095 case 0x80: /* Primary */
1096 case 0x88: /* Primary LE */
1098 switch (size) {
1099 case 1:
1100 ret = ldub_raw(addr);
1101 break;
1102 case 2:
1103 ret = lduw_raw(addr);
1104 break;
1105 case 4:
1106 ret = ldl_raw(addr);
1107 break;
1108 default:
1109 case 8:
1110 ret = ldq_raw(addr);
1111 break;
1114 break;
1115 case 0x83: /* Secondary no-fault */
1116 case 0x8b: /* Secondary no-fault LE */
1117 if (page_check_range(addr, size, PAGE_READ) == -1) {
1118 #ifdef DEBUG_ASI
1119 dump_asi("read ", last_addr, asi, size, ret);
1120 #endif
1121 return 0;
1123 /* Fall through */
1124 case 0x81: /* Secondary */
1125 case 0x89: /* Secondary LE */
1126 /* XXX */
1127 break;
1128 default:
1129 break;
1132 /* Convert from little endian */
1133 switch (asi) {
1134 case 0x88: /* Primary LE */
1135 case 0x89: /* Secondary LE */
1136 case 0x8a: /* Primary no-fault LE */
1137 case 0x8b: /* Secondary no-fault LE */
1138 switch (size) {
1139 case 2:
1140 ret = bswap16(ret);
1141 break;
1142 case 4:
1143 ret = bswap32(ret);
1144 break;
1145 case 8:
1146 ret = bswap64(ret);
1147 break;
1148 default:
1149 break;
1151 default:
1152 break;
1155 /* Convert to signed number */
1156 if (sign) {
1157 switch (size) {
1158 case 1:
1159 ret = (int8_t) ret;
1160 break;
1161 case 2:
1162 ret = (int16_t) ret;
1163 break;
1164 case 4:
1165 ret = (int32_t) ret;
1166 break;
1167 default:
1168 break;
1171 #ifdef DEBUG_ASI
1172 dump_asi("read ", last_addr, asi, size, ret);
1173 #endif
1174 return ret;
1177 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1179 #ifdef DEBUG_ASI
1180 dump_asi("write", addr, asi, size, val);
1181 #endif
1182 if (asi < 0x80) {
1183 helper_raise_exception(env, TT_PRIV_ACT);
1186 helper_check_align(addr, size - 1);
1187 addr = asi_address_mask(env, asi, addr);
1189 /* Convert to little endian */
1190 switch (asi) {
1191 case 0x88: /* Primary LE */
1192 case 0x89: /* Secondary LE */
1193 switch (size) {
1194 case 2:
1195 val = bswap16(val);
1196 break;
1197 case 4:
1198 val = bswap32(val);
1199 break;
1200 case 8:
1201 val = bswap64(val);
1202 break;
1203 default:
1204 break;
1206 default:
1207 break;
1210 switch (asi) {
1211 case 0x80: /* Primary */
1212 case 0x88: /* Primary LE */
1214 switch (size) {
1215 case 1:
1216 stb_raw(addr, val);
1217 break;
1218 case 2:
1219 stw_raw(addr, val);
1220 break;
1221 case 4:
1222 stl_raw(addr, val);
1223 break;
1224 case 8:
1225 default:
1226 stq_raw(addr, val);
1227 break;
1230 break;
1231 case 0x81: /* Secondary */
1232 case 0x89: /* Secondary LE */
1233 /* XXX */
1234 return;
1236 case 0x82: /* Primary no-fault, RO */
1237 case 0x83: /* Secondary no-fault, RO */
1238 case 0x8a: /* Primary no-fault LE, RO */
1239 case 0x8b: /* Secondary no-fault LE, RO */
1240 default:
1241 do_unassigned_access(addr, 1, 0, 1, size);
1242 return;
1246 #else /* CONFIG_USER_ONLY */
1248 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1250 uint64_t ret = 0;
1251 #if defined(DEBUG_ASI)
1252 target_ulong last_addr = addr;
1253 #endif
1255 asi &= 0xff;
1257 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1258 || (cpu_has_hypervisor(env)
1259 && asi >= 0x30 && asi < 0x80
1260 && !(env->hpstate & HS_PRIV))) {
1261 helper_raise_exception(env, TT_PRIV_ACT);
1264 helper_check_align(addr, size - 1);
1265 addr = asi_address_mask(env, asi, addr);
1267 /* process nonfaulting loads first */
1268 if ((asi & 0xf6) == 0x82) {
1269 int mmu_idx;
1271 /* secondary space access has lowest asi bit equal to 1 */
1272 if (env->pstate & PS_PRIV) {
1273 mmu_idx = (asi & 1) ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX;
1274 } else {
1275 mmu_idx = (asi & 1) ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX;
1278 if (cpu_get_phys_page_nofault(env, addr, mmu_idx) == -1ULL) {
1279 #ifdef DEBUG_ASI
1280 dump_asi("read ", last_addr, asi, size, ret);
1281 #endif
1282 /* env->exception_index is set in get_physical_address_data(). */
1283 helper_raise_exception(env, env->exception_index);
1286 /* convert nonfaulting load ASIs to normal load ASIs */
1287 asi &= ~0x02;
1290 switch (asi) {
1291 case 0x10: /* As if user primary */
1292 case 0x11: /* As if user secondary */
1293 case 0x18: /* As if user primary LE */
1294 case 0x19: /* As if user secondary LE */
1295 case 0x80: /* Primary */
1296 case 0x81: /* Secondary */
1297 case 0x88: /* Primary LE */
1298 case 0x89: /* Secondary LE */
1299 case 0xe2: /* UA2007 Primary block init */
1300 case 0xe3: /* UA2007 Secondary block init */
1301 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1302 if (cpu_hypervisor_mode(env)) {
1303 switch (size) {
1304 case 1:
1305 ret = ldub_hypv(addr);
1306 break;
1307 case 2:
1308 ret = lduw_hypv(addr);
1309 break;
1310 case 4:
1311 ret = ldl_hypv(addr);
1312 break;
1313 default:
1314 case 8:
1315 ret = ldq_hypv(addr);
1316 break;
1318 } else {
1319 /* secondary space access has lowest asi bit equal to 1 */
1320 if (asi & 1) {
1321 switch (size) {
1322 case 1:
1323 ret = ldub_kernel_secondary(addr);
1324 break;
1325 case 2:
1326 ret = lduw_kernel_secondary(addr);
1327 break;
1328 case 4:
1329 ret = ldl_kernel_secondary(addr);
1330 break;
1331 default:
1332 case 8:
1333 ret = ldq_kernel_secondary(addr);
1334 break;
1336 } else {
1337 switch (size) {
1338 case 1:
1339 ret = ldub_kernel(addr);
1340 break;
1341 case 2:
1342 ret = lduw_kernel(addr);
1343 break;
1344 case 4:
1345 ret = ldl_kernel(addr);
1346 break;
1347 default:
1348 case 8:
1349 ret = ldq_kernel(addr);
1350 break;
1354 } else {
1355 /* secondary space access has lowest asi bit equal to 1 */
1356 if (asi & 1) {
1357 switch (size) {
1358 case 1:
1359 ret = ldub_user_secondary(addr);
1360 break;
1361 case 2:
1362 ret = lduw_user_secondary(addr);
1363 break;
1364 case 4:
1365 ret = ldl_user_secondary(addr);
1366 break;
1367 default:
1368 case 8:
1369 ret = ldq_user_secondary(addr);
1370 break;
1372 } else {
1373 switch (size) {
1374 case 1:
1375 ret = ldub_user(addr);
1376 break;
1377 case 2:
1378 ret = lduw_user(addr);
1379 break;
1380 case 4:
1381 ret = ldl_user(addr);
1382 break;
1383 default:
1384 case 8:
1385 ret = ldq_user(addr);
1386 break;
1390 break;
1391 case 0x14: /* Bypass */
1392 case 0x15: /* Bypass, non-cacheable */
1393 case 0x1c: /* Bypass LE */
1394 case 0x1d: /* Bypass, non-cacheable LE */
1396 switch (size) {
1397 case 1:
1398 ret = ldub_phys(addr);
1399 break;
1400 case 2:
1401 ret = lduw_phys(addr);
1402 break;
1403 case 4:
1404 ret = ldl_phys(addr);
1405 break;
1406 default:
1407 case 8:
1408 ret = ldq_phys(addr);
1409 break;
1411 break;
1413 case 0x24: /* Nucleus quad LDD 128 bit atomic */
1414 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE
1415 Only ldda allowed */
1416 helper_raise_exception(env, TT_ILL_INSN);
1417 return 0;
1418 case 0x04: /* Nucleus */
1419 case 0x0c: /* Nucleus Little Endian (LE) */
1421 switch (size) {
1422 case 1:
1423 ret = ldub_nucleus(addr);
1424 break;
1425 case 2:
1426 ret = lduw_nucleus(addr);
1427 break;
1428 case 4:
1429 ret = ldl_nucleus(addr);
1430 break;
1431 default:
1432 case 8:
1433 ret = ldq_nucleus(addr);
1434 break;
1436 break;
1438 case 0x4a: /* UPA config */
1439 /* XXX */
1440 break;
1441 case 0x45: /* LSU */
1442 ret = env->lsu;
1443 break;
1444 case 0x50: /* I-MMU regs */
1446 int reg = (addr >> 3) & 0xf;
1448 if (reg == 0) {
1449 /* I-TSB Tag Target register */
1450 ret = ultrasparc_tag_target(env->immu.tag_access);
1451 } else {
1452 ret = env->immuregs[reg];
1455 break;
1457 case 0x51: /* I-MMU 8k TSB pointer */
1459 /* env->immuregs[5] holds I-MMU TSB register value
1460 env->immuregs[6] holds I-MMU Tag Access register value */
1461 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
1462 8*1024);
1463 break;
1465 case 0x52: /* I-MMU 64k TSB pointer */
1467 /* env->immuregs[5] holds I-MMU TSB register value
1468 env->immuregs[6] holds I-MMU Tag Access register value */
1469 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
1470 64*1024);
1471 break;
1473 case 0x55: /* I-MMU data access */
1475 int reg = (addr >> 3) & 0x3f;
1477 ret = env->itlb[reg].tte;
1478 break;
1480 case 0x56: /* I-MMU tag read */
1482 int reg = (addr >> 3) & 0x3f;
1484 ret = env->itlb[reg].tag;
1485 break;
1487 case 0x58: /* D-MMU regs */
1489 int reg = (addr >> 3) & 0xf;
1491 if (reg == 0) {
1492 /* D-TSB Tag Target register */
1493 ret = ultrasparc_tag_target(env->dmmu.tag_access);
1494 } else {
1495 ret = env->dmmuregs[reg];
1497 break;
1499 case 0x59: /* D-MMU 8k TSB pointer */
1501 /* env->dmmuregs[5] holds D-MMU TSB register value
1502 env->dmmuregs[6] holds D-MMU Tag Access register value */
1503 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
1504 8*1024);
1505 break;
1507 case 0x5a: /* D-MMU 64k TSB pointer */
1509 /* env->dmmuregs[5] holds D-MMU TSB register value
1510 env->dmmuregs[6] holds D-MMU Tag Access register value */
1511 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
1512 64*1024);
1513 break;
1515 case 0x5d: /* D-MMU data access */
1517 int reg = (addr >> 3) & 0x3f;
1519 ret = env->dtlb[reg].tte;
1520 break;
1522 case 0x5e: /* D-MMU tag read */
1524 int reg = (addr >> 3) & 0x3f;
1526 ret = env->dtlb[reg].tag;
1527 break;
1529 case 0x46: /* D-cache data */
1530 case 0x47: /* D-cache tag access */
1531 case 0x4b: /* E-cache error enable */
1532 case 0x4c: /* E-cache asynchronous fault status */
1533 case 0x4d: /* E-cache asynchronous fault address */
1534 case 0x4e: /* E-cache tag data */
1535 case 0x66: /* I-cache instruction access */
1536 case 0x67: /* I-cache tag access */
1537 case 0x6e: /* I-cache predecode */
1538 case 0x6f: /* I-cache LRU etc. */
1539 case 0x76: /* E-cache tag */
1540 case 0x7e: /* E-cache tag */
1541 break;
1542 case 0x5b: /* D-MMU data pointer */
1543 case 0x48: /* Interrupt dispatch, RO */
1544 case 0x49: /* Interrupt data receive */
1545 case 0x7f: /* Incoming interrupt vector, RO */
1546 /* XXX */
1547 break;
1548 case 0x54: /* I-MMU data in, WO */
1549 case 0x57: /* I-MMU demap, WO */
1550 case 0x5c: /* D-MMU data in, WO */
1551 case 0x5f: /* D-MMU demap, WO */
1552 case 0x77: /* Interrupt vector, WO */
1553 default:
1554 do_unassigned_access(addr, 0, 0, 1, size);
1555 ret = 0;
1556 break;
1559 /* Convert from little endian */
1560 switch (asi) {
1561 case 0x0c: /* Nucleus Little Endian (LE) */
1562 case 0x18: /* As if user primary LE */
1563 case 0x19: /* As if user secondary LE */
1564 case 0x1c: /* Bypass LE */
1565 case 0x1d: /* Bypass, non-cacheable LE */
1566 case 0x88: /* Primary LE */
1567 case 0x89: /* Secondary LE */
1568 switch(size) {
1569 case 2:
1570 ret = bswap16(ret);
1571 break;
1572 case 4:
1573 ret = bswap32(ret);
1574 break;
1575 case 8:
1576 ret = bswap64(ret);
1577 break;
1578 default:
1579 break;
1581 default:
1582 break;
1585 /* Convert to signed number */
1586 if (sign) {
1587 switch (size) {
1588 case 1:
1589 ret = (int8_t) ret;
1590 break;
1591 case 2:
1592 ret = (int16_t) ret;
1593 break;
1594 case 4:
1595 ret = (int32_t) ret;
1596 break;
1597 default:
1598 break;
1601 #ifdef DEBUG_ASI
1602 dump_asi("read ", last_addr, asi, size, ret);
1603 #endif
1604 return ret;
1607 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1609 #ifdef DEBUG_ASI
1610 dump_asi("write", addr, asi, size, val);
1611 #endif
1613 asi &= 0xff;
1615 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1616 || (cpu_has_hypervisor(env)
1617 && asi >= 0x30 && asi < 0x80
1618 && !(env->hpstate & HS_PRIV))) {
1619 helper_raise_exception(env, TT_PRIV_ACT);
1622 helper_check_align(addr, size - 1);
1623 addr = asi_address_mask(env, asi, addr);
1625 /* Convert to little endian */
1626 switch (asi) {
1627 case 0x0c: /* Nucleus Little Endian (LE) */
1628 case 0x18: /* As if user primary LE */
1629 case 0x19: /* As if user secondary LE */
1630 case 0x1c: /* Bypass LE */
1631 case 0x1d: /* Bypass, non-cacheable LE */
1632 case 0x88: /* Primary LE */
1633 case 0x89: /* Secondary LE */
1634 switch (size) {
1635 case 2:
1636 val = bswap16(val);
1637 break;
1638 case 4:
1639 val = bswap32(val);
1640 break;
1641 case 8:
1642 val = bswap64(val);
1643 break;
1644 default:
1645 break;
1647 default:
1648 break;
1651 switch (asi) {
1652 case 0x10: /* As if user primary */
1653 case 0x11: /* As if user secondary */
1654 case 0x18: /* As if user primary LE */
1655 case 0x19: /* As if user secondary LE */
1656 case 0x80: /* Primary */
1657 case 0x81: /* Secondary */
1658 case 0x88: /* Primary LE */
1659 case 0x89: /* Secondary LE */
1660 case 0xe2: /* UA2007 Primary block init */
1661 case 0xe3: /* UA2007 Secondary block init */
1662 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1663 if (cpu_hypervisor_mode(env)) {
1664 switch (size) {
1665 case 1:
1666 stb_hypv(addr, val);
1667 break;
1668 case 2:
1669 stw_hypv(addr, val);
1670 break;
1671 case 4:
1672 stl_hypv(addr, val);
1673 break;
1674 case 8:
1675 default:
1676 stq_hypv(addr, val);
1677 break;
1679 } else {
1680 /* secondary space access has lowest asi bit equal to 1 */
1681 if (asi & 1) {
1682 switch (size) {
1683 case 1:
1684 stb_kernel_secondary(addr, val);
1685 break;
1686 case 2:
1687 stw_kernel_secondary(addr, val);
1688 break;
1689 case 4:
1690 stl_kernel_secondary(addr, val);
1691 break;
1692 case 8:
1693 default:
1694 stq_kernel_secondary(addr, val);
1695 break;
1697 } else {
1698 switch (size) {
1699 case 1:
1700 stb_kernel(addr, val);
1701 break;
1702 case 2:
1703 stw_kernel(addr, val);
1704 break;
1705 case 4:
1706 stl_kernel(addr, val);
1707 break;
1708 case 8:
1709 default:
1710 stq_kernel(addr, val);
1711 break;
1715 } else {
1716 /* secondary space access has lowest asi bit equal to 1 */
1717 if (asi & 1) {
1718 switch (size) {
1719 case 1:
1720 stb_user_secondary(addr, val);
1721 break;
1722 case 2:
1723 stw_user_secondary(addr, val);
1724 break;
1725 case 4:
1726 stl_user_secondary(addr, val);
1727 break;
1728 case 8:
1729 default:
1730 stq_user_secondary(addr, val);
1731 break;
1733 } else {
1734 switch (size) {
1735 case 1:
1736 stb_user(addr, val);
1737 break;
1738 case 2:
1739 stw_user(addr, val);
1740 break;
1741 case 4:
1742 stl_user(addr, val);
1743 break;
1744 case 8:
1745 default:
1746 stq_user(addr, val);
1747 break;
1751 break;
1752 case 0x14: /* Bypass */
1753 case 0x15: /* Bypass, non-cacheable */
1754 case 0x1c: /* Bypass LE */
1755 case 0x1d: /* Bypass, non-cacheable LE */
1757 switch (size) {
1758 case 1:
1759 stb_phys(addr, val);
1760 break;
1761 case 2:
1762 stw_phys(addr, val);
1763 break;
1764 case 4:
1765 stl_phys(addr, val);
1766 break;
1767 case 8:
1768 default:
1769 stq_phys(addr, val);
1770 break;
1773 return;
1774 case 0x24: /* Nucleus quad LDD 128 bit atomic */
1775 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE
1776 Only ldda allowed */
1777 helper_raise_exception(env, TT_ILL_INSN);
1778 return;
1779 case 0x04: /* Nucleus */
1780 case 0x0c: /* Nucleus Little Endian (LE) */
1782 switch (size) {
1783 case 1:
1784 stb_nucleus(addr, val);
1785 break;
1786 case 2:
1787 stw_nucleus(addr, val);
1788 break;
1789 case 4:
1790 stl_nucleus(addr, val);
1791 break;
1792 default:
1793 case 8:
1794 stq_nucleus(addr, val);
1795 break;
1797 break;
1800 case 0x4a: /* UPA config */
1801 /* XXX */
1802 return;
1803 case 0x45: /* LSU */
1805 uint64_t oldreg;
1807 oldreg = env->lsu;
1808 env->lsu = val & (DMMU_E | IMMU_E);
1809 /* Mappings generated during D/I MMU disabled mode are
1810 invalid in normal mode */
1811 if (oldreg != env->lsu) {
1812 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
1813 oldreg, env->lsu);
1814 #ifdef DEBUG_MMU
1815 dump_mmu(stdout, fprintf, env1);
1816 #endif
1817 tlb_flush(env, 1);
1819 return;
1821 case 0x50: /* I-MMU regs */
1823 int reg = (addr >> 3) & 0xf;
1824 uint64_t oldreg;
1826 oldreg = env->immuregs[reg];
1827 switch (reg) {
1828 case 0: /* RO */
1829 return;
1830 case 1: /* Not in I-MMU */
1831 case 2:
1832 return;
1833 case 3: /* SFSR */
1834 if ((val & 1) == 0) {
1835 val = 0; /* Clear SFSR */
1837 env->immu.sfsr = val;
1838 break;
1839 case 4: /* RO */
1840 return;
1841 case 5: /* TSB access */
1842 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
1843 PRIx64 "\n", env->immu.tsb, val);
1844 env->immu.tsb = val;
1845 break;
1846 case 6: /* Tag access */
1847 env->immu.tag_access = val;
1848 break;
1849 case 7:
1850 case 8:
1851 return;
1852 default:
1853 break;
1856 if (oldreg != env->immuregs[reg]) {
1857 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1858 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1860 #ifdef DEBUG_MMU
1861 dump_mmu(stdout, fprintf, env);
1862 #endif
1863 return;
1865 case 0x54: /* I-MMU data in */
1866 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
1867 return;
1868 case 0x55: /* I-MMU data access */
1870 /* TODO: auto demap */
1872 unsigned int i = (addr >> 3) & 0x3f;
1874 replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
1876 #ifdef DEBUG_MMU
1877 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
1878 dump_mmu(stdout, fprintf, env);
1879 #endif
1880 return;
1882 case 0x57: /* I-MMU demap */
1883 demap_tlb(env->itlb, addr, "immu", env);
1884 return;
1885 case 0x58: /* D-MMU regs */
1887 int reg = (addr >> 3) & 0xf;
1888 uint64_t oldreg;
1890 oldreg = env->dmmuregs[reg];
1891 switch (reg) {
1892 case 0: /* RO */
1893 case 4:
1894 return;
1895 case 3: /* SFSR */
1896 if ((val & 1) == 0) {
1897 val = 0; /* Clear SFSR, Fault address */
1898 env->dmmu.sfar = 0;
1900 env->dmmu.sfsr = val;
1901 break;
1902 case 1: /* Primary context */
1903 env->dmmu.mmu_primary_context = val;
1904 /* can be optimized to only flush MMU_USER_IDX
1905 and MMU_KERNEL_IDX entries */
1906 tlb_flush(env, 1);
1907 break;
1908 case 2: /* Secondary context */
1909 env->dmmu.mmu_secondary_context = val;
1910 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
1911 and MMU_KERNEL_SECONDARY_IDX entries */
1912 tlb_flush(env, 1);
1913 break;
1914 case 5: /* TSB access */
1915 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
1916 PRIx64 "\n", env->dmmu.tsb, val);
1917 env->dmmu.tsb = val;
1918 break;
1919 case 6: /* Tag access */
1920 env->dmmu.tag_access = val;
1921 break;
1922 case 7: /* Virtual Watchpoint */
1923 case 8: /* Physical Watchpoint */
1924 default:
1925 env->dmmuregs[reg] = val;
1926 break;
1929 if (oldreg != env->dmmuregs[reg]) {
1930 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1931 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
1933 #ifdef DEBUG_MMU
1934 dump_mmu(stdout, fprintf, env);
1935 #endif
1936 return;
1938 case 0x5c: /* D-MMU data in */
1939 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
1940 return;
1941 case 0x5d: /* D-MMU data access */
1943 unsigned int i = (addr >> 3) & 0x3f;
1945 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
1947 #ifdef DEBUG_MMU
1948 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
1949 dump_mmu(stdout, fprintf, env);
1950 #endif
1951 return;
1953 case 0x5f: /* D-MMU demap */
1954 demap_tlb(env->dtlb, addr, "dmmu", env);
1955 return;
1956 case 0x49: /* Interrupt data receive */
1957 /* XXX */
1958 return;
1959 case 0x46: /* D-cache data */
1960 case 0x47: /* D-cache tag access */
1961 case 0x4b: /* E-cache error enable */
1962 case 0x4c: /* E-cache asynchronous fault status */
1963 case 0x4d: /* E-cache asynchronous fault address */
1964 case 0x4e: /* E-cache tag data */
1965 case 0x66: /* I-cache instruction access */
1966 case 0x67: /* I-cache tag access */
1967 case 0x6e: /* I-cache predecode */
1968 case 0x6f: /* I-cache LRU etc. */
1969 case 0x76: /* E-cache tag */
1970 case 0x7e: /* E-cache tag */
1971 return;
1972 case 0x51: /* I-MMU 8k TSB pointer, RO */
1973 case 0x52: /* I-MMU 64k TSB pointer, RO */
1974 case 0x56: /* I-MMU tag read, RO */
1975 case 0x59: /* D-MMU 8k TSB pointer, RO */
1976 case 0x5a: /* D-MMU 64k TSB pointer, RO */
1977 case 0x5b: /* D-MMU data pointer, RO */
1978 case 0x5e: /* D-MMU tag read, RO */
1979 case 0x48: /* Interrupt dispatch, RO */
1980 case 0x7f: /* Incoming interrupt vector, RO */
1981 case 0x82: /* Primary no-fault, RO */
1982 case 0x83: /* Secondary no-fault, RO */
1983 case 0x8a: /* Primary no-fault LE, RO */
1984 case 0x8b: /* Secondary no-fault LE, RO */
1985 default:
1986 do_unassigned_access(addr, 1, 0, 1, size);
1987 return;
1990 #endif /* CONFIG_USER_ONLY */
1992 void helper_ldda_asi(target_ulong addr, int asi, int rd)
1994 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1995 || (cpu_has_hypervisor(env)
1996 && asi >= 0x30 && asi < 0x80
1997 && !(env->hpstate & HS_PRIV))) {
1998 helper_raise_exception(env, TT_PRIV_ACT);
2001 addr = asi_address_mask(env, asi, addr);
2003 switch (asi) {
2004 #if !defined(CONFIG_USER_ONLY)
2005 case 0x24: /* Nucleus quad LDD 128 bit atomic */
2006 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE */
2007 helper_check_align(addr, 0xf);
2008 if (rd == 0) {
2009 env->gregs[1] = ldq_nucleus(addr + 8);
2010 if (asi == 0x2c) {
2011 bswap64s(&env->gregs[1]);
2013 } else if (rd < 8) {
2014 env->gregs[rd] = ldq_nucleus(addr);
2015 env->gregs[rd + 1] = ldq_nucleus(addr + 8);
2016 if (asi == 0x2c) {
2017 bswap64s(&env->gregs[rd]);
2018 bswap64s(&env->gregs[rd + 1]);
2020 } else {
2021 env->regwptr[rd] = ldq_nucleus(addr);
2022 env->regwptr[rd + 1] = ldq_nucleus(addr + 8);
2023 if (asi == 0x2c) {
2024 bswap64s(&env->regwptr[rd]);
2025 bswap64s(&env->regwptr[rd + 1]);
2028 break;
2029 #endif
2030 default:
2031 helper_check_align(addr, 0x3);
2032 if (rd == 0) {
2033 env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
2034 } else if (rd < 8) {
2035 env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
2036 env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2037 } else {
2038 env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
2039 env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2041 break;
2045 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2047 unsigned int i;
2048 target_ulong val;
2050 helper_check_align(addr, 3);
2051 addr = asi_address_mask(env, asi, addr);
2053 switch (asi) {
2054 case 0xf0: /* UA2007/JPS1 Block load primary */
2055 case 0xf1: /* UA2007/JPS1 Block load secondary */
2056 case 0xf8: /* UA2007/JPS1 Block load primary LE */
2057 case 0xf9: /* UA2007/JPS1 Block load secondary LE */
2058 if (rd & 7) {
2059 helper_raise_exception(env, TT_ILL_INSN);
2060 return;
2062 helper_check_align(addr, 0x3f);
2063 for (i = 0; i < 8; i++, rd += 2, addr += 8) {
2064 env->fpr[rd/2].ll = helper_ld_asi(addr, asi & 0x8f, 8, 0);
2066 return;
2068 case 0x16: /* UA2007 Block load primary, user privilege */
2069 case 0x17: /* UA2007 Block load secondary, user privilege */
2070 case 0x1e: /* UA2007 Block load primary LE, user privilege */
2071 case 0x1f: /* UA2007 Block load secondary LE, user privilege */
2072 case 0x70: /* JPS1 Block load primary, user privilege */
2073 case 0x71: /* JPS1 Block load secondary, user privilege */
2074 case 0x78: /* JPS1 Block load primary LE, user privilege */
2075 case 0x79: /* JPS1 Block load secondary LE, user privilege */
2076 if (rd & 7) {
2077 helper_raise_exception(env, TT_ILL_INSN);
2078 return;
2080 helper_check_align(addr, 0x3f);
2081 for (i = 0; i < 8; i++, rd += 2, addr += 4) {
2082 env->fpr[rd/2].ll = helper_ld_asi(addr, asi & 0x19, 8, 0);
2084 return;
2086 default:
2087 break;
2090 switch (size) {
2091 default:
2092 case 4:
2093 val = helper_ld_asi(addr, asi, size, 0);
2094 if (rd & 1) {
2095 env->fpr[rd/2].l.lower = val;
2096 } else {
2097 env->fpr[rd/2].l.upper = val;
2099 break;
2100 case 8:
2101 env->fpr[rd/2].ll = helper_ld_asi(addr, asi, size, 0);
2102 break;
2103 case 16:
2104 env->fpr[rd/2].ll = helper_ld_asi(addr, asi, 8, 0);
2105 env->fpr[rd/2 + 1].ll = helper_ld_asi(addr + 8, asi, 8, 0);
2106 break;
2110 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2112 unsigned int i;
2113 target_ulong val;
2115 helper_check_align(addr, 3);
2116 addr = asi_address_mask(env, asi, addr);
2118 switch (asi) {
2119 case 0xe0: /* UA2007/JPS1 Block commit store primary (cache flush) */
2120 case 0xe1: /* UA2007/JPS1 Block commit store secondary (cache flush) */
2121 case 0xf0: /* UA2007/JPS1 Block store primary */
2122 case 0xf1: /* UA2007/JPS1 Block store secondary */
2123 case 0xf8: /* UA2007/JPS1 Block store primary LE */
2124 case 0xf9: /* UA2007/JPS1 Block store secondary LE */
2125 if (rd & 7) {
2126 helper_raise_exception(env, TT_ILL_INSN);
2127 return;
2129 helper_check_align(addr, 0x3f);
2130 for (i = 0; i < 8; i++, rd += 2, addr += 8) {
2131 helper_st_asi(addr, env->fpr[rd/2].ll, asi & 0x8f, 8);
2134 return;
2135 case 0x16: /* UA2007 Block load primary, user privilege */
2136 case 0x17: /* UA2007 Block load secondary, user privilege */
2137 case 0x1e: /* UA2007 Block load primary LE, user privilege */
2138 case 0x1f: /* UA2007 Block load secondary LE, user privilege */
2139 case 0x70: /* JPS1 Block store primary, user privilege */
2140 case 0x71: /* JPS1 Block store secondary, user privilege */
2141 case 0x78: /* JPS1 Block load primary LE, user privilege */
2142 case 0x79: /* JPS1 Block load secondary LE, user privilege */
2143 if (rd & 7) {
2144 helper_raise_exception(env, TT_ILL_INSN);
2145 return;
2147 helper_check_align(addr, 0x3f);
2148 for (i = 0; i < 8; i++, rd += 2, addr += 8) {
2149 helper_st_asi(addr, env->fpr[rd/2].ll, asi & 0x19, 8);
2152 return;
2153 default:
2154 break;
2157 switch (size) {
2158 default:
2159 case 4:
2160 if (rd & 1) {
2161 val = env->fpr[rd/2].l.lower;
2162 } else {
2163 val = env->fpr[rd/2].l.upper;
2165 helper_st_asi(addr, val, asi, size);
2166 break;
2167 case 8:
2168 helper_st_asi(addr, env->fpr[rd/2].ll, asi, size);
2169 break;
2170 case 16:
2171 helper_st_asi(addr, env->fpr[rd/2].ll, asi, 8);
2172 helper_st_asi(addr + 8, env->fpr[rd/2 + 1].ll, asi, 8);
2173 break;
2177 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2178 target_ulong val2, uint32_t asi)
2180 target_ulong ret;
2182 val2 &= 0xffffffffUL;
2183 ret = helper_ld_asi(addr, asi, 4, 0);
2184 ret &= 0xffffffffUL;
2185 if (val2 == ret) {
2186 helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
2188 return ret;
2191 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2192 target_ulong val2, uint32_t asi)
2194 target_ulong ret;
2196 ret = helper_ld_asi(addr, asi, 8, 0);
2197 if (val2 == ret) {
2198 helper_st_asi(addr, val1, asi, 8);
2200 return ret;
2202 #endif /* TARGET_SPARC64 */
2204 void helper_ldqf(target_ulong addr, int mem_idx)
2206 /* XXX add 128 bit load */
2207 CPU_QuadU u;
2209 helper_check_align(addr, 7);
2210 #if !defined(CONFIG_USER_ONLY)
2211 switch (mem_idx) {
2212 case MMU_USER_IDX:
2213 u.ll.upper = ldq_user(addr);
2214 u.ll.lower = ldq_user(addr + 8);
2215 QT0 = u.q;
2216 break;
2217 case MMU_KERNEL_IDX:
2218 u.ll.upper = ldq_kernel(addr);
2219 u.ll.lower = ldq_kernel(addr + 8);
2220 QT0 = u.q;
2221 break;
2222 #ifdef TARGET_SPARC64
2223 case MMU_HYPV_IDX:
2224 u.ll.upper = ldq_hypv(addr);
2225 u.ll.lower = ldq_hypv(addr + 8);
2226 QT0 = u.q;
2227 break;
2228 #endif
2229 default:
2230 DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx);
2231 break;
2233 #else
2234 u.ll.upper = ldq_raw(address_mask(env, addr));
2235 u.ll.lower = ldq_raw(address_mask(env, addr + 8));
2236 QT0 = u.q;
2237 #endif
2240 void helper_stqf(target_ulong addr, int mem_idx)
2242 /* XXX add 128 bit store */
2243 CPU_QuadU u;
2245 helper_check_align(addr, 7);
2246 #if !defined(CONFIG_USER_ONLY)
2247 switch (mem_idx) {
2248 case MMU_USER_IDX:
2249 u.q = QT0;
2250 stq_user(addr, u.ll.upper);
2251 stq_user(addr + 8, u.ll.lower);
2252 break;
2253 case MMU_KERNEL_IDX:
2254 u.q = QT0;
2255 stq_kernel(addr, u.ll.upper);
2256 stq_kernel(addr + 8, u.ll.lower);
2257 break;
2258 #ifdef TARGET_SPARC64
2259 case MMU_HYPV_IDX:
2260 u.q = QT0;
2261 stq_hypv(addr, u.ll.upper);
2262 stq_hypv(addr + 8, u.ll.lower);
2263 break;
2264 #endif
2265 default:
2266 DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx);
2267 break;
2269 #else
2270 u.q = QT0;
2271 stq_raw(address_mask(env, addr), u.ll.upper);
2272 stq_raw(address_mask(env, addr + 8), u.ll.lower);
2273 #endif
2276 #ifndef TARGET_SPARC64
2277 #if !defined(CONFIG_USER_ONLY)
2278 static void do_unassigned_access(target_phys_addr_t addr, int is_write,
2279 int is_exec, int is_asi, int size)
2281 int fault_type;
2283 #ifdef DEBUG_UNASSIGNED
2284 if (is_asi) {
2285 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
2286 " asi 0x%02x from " TARGET_FMT_lx "\n",
2287 is_exec ? "exec" : is_write ? "write" : "read", size,
2288 size == 1 ? "" : "s", addr, is_asi, env->pc);
2289 } else {
2290 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
2291 " from " TARGET_FMT_lx "\n",
2292 is_exec ? "exec" : is_write ? "write" : "read", size,
2293 size == 1 ? "" : "s", addr, env->pc);
2295 #endif
2296 /* Don't overwrite translation and access faults */
2297 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
2298 if ((fault_type > 4) || (fault_type == 0)) {
2299 env->mmuregs[3] = 0; /* Fault status register */
2300 if (is_asi) {
2301 env->mmuregs[3] |= 1 << 16;
2303 if (env->psrs) {
2304 env->mmuregs[3] |= 1 << 5;
2306 if (is_exec) {
2307 env->mmuregs[3] |= 1 << 6;
2309 if (is_write) {
2310 env->mmuregs[3] |= 1 << 7;
2312 env->mmuregs[3] |= (5 << 2) | 2;
2313 /* SuperSPARC will never place instruction fault addresses in the FAR */
2314 if (!is_exec) {
2315 env->mmuregs[4] = addr; /* Fault address register */
2318 /* overflow (same type fault was not read before another fault) */
2319 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
2320 env->mmuregs[3] |= 1;
2323 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
2324 if (is_exec) {
2325 helper_raise_exception(env, TT_CODE_ACCESS);
2326 } else {
2327 helper_raise_exception(env, TT_DATA_ACCESS);
2331 /* flush neverland mappings created during no-fault mode,
2332 so the sequential MMU faults report proper fault types */
2333 if (env->mmuregs[0] & MMU_NF) {
2334 tlb_flush(env, 1);
2337 #endif
2338 #else
2339 #if defined(CONFIG_USER_ONLY)
2340 static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
2341 int is_asi, int size)
2342 #else
2343 static void do_unassigned_access(target_phys_addr_t addr, int is_write,
2344 int is_exec, int is_asi, int size)
2345 #endif
2347 #ifdef DEBUG_UNASSIGNED
2348 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
2349 "\n", addr, env->pc);
2350 #endif
2352 if (is_exec) {
2353 helper_raise_exception(env, TT_CODE_ACCESS);
2354 } else {
2355 helper_raise_exception(env, TT_DATA_ACCESS);
2358 #endif
2360 #if !defined(CONFIG_USER_ONLY)
2361 void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
2362 int is_write, int is_exec, int is_asi, int size)
2364 CPUState *saved_env;
2366 saved_env = env;
2367 env = env1;
2368 do_unassigned_access(addr, is_write, is_exec, is_asi, size);
2369 env = saved_env;
2371 #endif