2 #include "host-utils.h"
4 #if !defined(CONFIG_USER_ONLY)
5 #include "softmmu_exec.h"
6 #endif /* !defined(CONFIG_USER_ONLY) */
10 //#define DEBUG_UNALIGNED
11 //#define DEBUG_UNASSIGNED
15 #define DPRINTF_MMU(fmt, args...) \
16 do { printf("MMU: " fmt , ##args); } while (0)
18 #define DPRINTF_MMU(fmt, args...) do {} while (0)
22 #define DPRINTF_MXCC(fmt, args...) \
23 do { printf("MXCC: " fmt , ##args); } while (0)
25 #define DPRINTF_MXCC(fmt, args...) do {} while (0)
29 #define DPRINTF_ASI(fmt, args...) \
30 do { printf("ASI: " fmt , ##args); } while (0)
32 #define DPRINTF_ASI(fmt, args...) do {} while (0)
37 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
39 #define AM_CHECK(env1) (1)
43 static inline void address_mask(CPUState
*env1
, target_ulong
*addr
)
47 *addr
&= 0xffffffffULL
;
51 void raise_exception(int tt
)
53 env
->exception_index
= tt
;
57 void helper_trap(target_ulong nb_trap
)
59 env
->exception_index
= TT_TRAP
+ (nb_trap
& 0x7f);
63 void helper_trapcc(target_ulong nb_trap
, target_ulong do_trap
)
66 env
->exception_index
= TT_TRAP
+ (nb_trap
& 0x7f);
71 static inline void set_cwp(int new_cwp
)
73 cpu_set_cwp(env
, new_cwp
);
76 void helper_check_align(target_ulong addr
, uint32_t align
)
79 #ifdef DEBUG_UNALIGNED
80 printf("Unaligned access to 0x" TARGET_FMT_lx
" from 0x" TARGET_FMT_lx
83 raise_exception(TT_UNALIGNED
);
87 #define F_HELPER(name, p) void helper_f##name##p(void)
89 #define F_BINOP(name) \
90 float32 helper_f ## name ## s (float32 src1, float32 src2) \
92 return float32_ ## name (src1, src2, &env->fp_status); \
96 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
100 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
109 void helper_fsmuld(void)
111 DT0
= float64_mul(float32_to_float64(FT0
, &env
->fp_status
),
112 float32_to_float64(FT1
, &env
->fp_status
),
116 void helper_fdmulq(void)
118 QT0
= float128_mul(float64_to_float128(DT0
, &env
->fp_status
),
119 float64_to_float128(DT1
, &env
->fp_status
),
123 float32
helper_fnegs(float32 src
)
125 return float32_chs(src
);
128 #ifdef TARGET_SPARC64
131 DT0
= float64_chs(DT1
);
136 QT0
= float128_chs(QT1
);
140 /* Integer to float conversion. */
141 float32
helper_fitos(int32_t src
)
143 return int32_to_float32(src
, &env
->fp_status
);
148 DT0
= int32_to_float64(*((int32_t *)&FT1
), &env
->fp_status
);
153 QT0
= int32_to_float128(*((int32_t *)&FT1
), &env
->fp_status
);
156 #ifdef TARGET_SPARC64
159 FT0
= int64_to_float32(*((int64_t *)&DT1
), &env
->fp_status
);
164 DT0
= int64_to_float64(*((int64_t *)&DT1
), &env
->fp_status
);
169 QT0
= int64_to_float128(*((int64_t *)&DT1
), &env
->fp_status
);
174 /* floating point conversion */
175 void helper_fdtos(void)
177 FT0
= float64_to_float32(DT1
, &env
->fp_status
);
180 void helper_fstod(void)
182 DT0
= float32_to_float64(FT1
, &env
->fp_status
);
185 void helper_fqtos(void)
187 FT0
= float128_to_float32(QT1
, &env
->fp_status
);
190 void helper_fstoq(void)
192 QT0
= float32_to_float128(FT1
, &env
->fp_status
);
195 void helper_fqtod(void)
197 DT0
= float128_to_float64(QT1
, &env
->fp_status
);
200 void helper_fdtoq(void)
202 QT0
= float64_to_float128(DT1
, &env
->fp_status
);
205 /* Float to integer conversion. */
206 int32_t helper_fstoi(float32 src
)
208 return float32_to_int32_round_to_zero(src
, &env
->fp_status
);
211 void helper_fdtoi(void)
213 *((int32_t *)&FT0
) = float64_to_int32_round_to_zero(DT1
, &env
->fp_status
);
216 void helper_fqtoi(void)
218 *((int32_t *)&FT0
) = float128_to_int32_round_to_zero(QT1
, &env
->fp_status
);
221 #ifdef TARGET_SPARC64
222 void helper_fstox(void)
224 *((int64_t *)&DT0
) = float32_to_int64_round_to_zero(FT1
, &env
->fp_status
);
227 void helper_fdtox(void)
229 *((int64_t *)&DT0
) = float64_to_int64_round_to_zero(DT1
, &env
->fp_status
);
232 void helper_fqtox(void)
234 *((int64_t *)&DT0
) = float128_to_int64_round_to_zero(QT1
, &env
->fp_status
);
237 void helper_faligndata(void)
241 tmp
= (*((uint64_t *)&DT0
)) << ((env
->gsr
& 7) * 8);
242 /* on many architectures a shift of 64 does nothing */
243 if ((env
->gsr
& 7) != 0) {
244 tmp
|= (*((uint64_t *)&DT1
)) >> (64 - (env
->gsr
& 7) * 8);
246 *((uint64_t *)&DT0
) = tmp
;
249 #ifdef WORDS_BIGENDIAN
250 #define VIS_B64(n) b[7 - (n)]
251 #define VIS_W64(n) w[3 - (n)]
252 #define VIS_SW64(n) sw[3 - (n)]
253 #define VIS_L64(n) l[1 - (n)]
254 #define VIS_B32(n) b[3 - (n)]
255 #define VIS_W32(n) w[1 - (n)]
257 #define VIS_B64(n) b[n]
258 #define VIS_W64(n) w[n]
259 #define VIS_SW64(n) sw[n]
260 #define VIS_L64(n) l[n]
261 #define VIS_B32(n) b[n]
262 #define VIS_W32(n) w[n]
280 void helper_fpmerge(void)
287 // Reverse calculation order to handle overlap
288 d
.VIS_B64(7) = s
.VIS_B64(3);
289 d
.VIS_B64(6) = d
.VIS_B64(3);
290 d
.VIS_B64(5) = s
.VIS_B64(2);
291 d
.VIS_B64(4) = d
.VIS_B64(2);
292 d
.VIS_B64(3) = s
.VIS_B64(1);
293 d
.VIS_B64(2) = d
.VIS_B64(1);
294 d
.VIS_B64(1) = s
.VIS_B64(0);
295 //d.VIS_B64(0) = d.VIS_B64(0);
300 void helper_fmul8x16(void)
309 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
310 if ((tmp & 0xff) > 0x7f) \
312 d.VIS_W64(r) = tmp >> 8;
323 void helper_fmul8x16al(void)
332 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
333 if ((tmp & 0xff) > 0x7f) \
335 d.VIS_W64(r) = tmp >> 8;
346 void helper_fmul8x16au(void)
355 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
356 if ((tmp & 0xff) > 0x7f) \
358 d.VIS_W64(r) = tmp >> 8;
369 void helper_fmul8sux16(void)
378 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
379 if ((tmp & 0xff) > 0x7f) \
381 d.VIS_W64(r) = tmp >> 8;
392 void helper_fmul8ulx16(void)
401 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
402 if ((tmp & 0xff) > 0x7f) \
404 d.VIS_W64(r) = tmp >> 8;
415 void helper_fmuld8sux16(void)
424 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
425 if ((tmp & 0xff) > 0x7f) \
429 // Reverse calculation order to handle overlap
437 void helper_fmuld8ulx16(void)
446 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
447 if ((tmp & 0xff) > 0x7f) \
451 // Reverse calculation order to handle overlap
459 void helper_fexpand(void)
464 s
.l
= (uint32_t)(*(uint64_t *)&DT0
& 0xffffffff);
466 d
.VIS_L64(0) = s
.VIS_W32(0) << 4;
467 d
.VIS_L64(1) = s
.VIS_W32(1) << 4;
468 d
.VIS_L64(2) = s
.VIS_W32(2) << 4;
469 d
.VIS_L64(3) = s
.VIS_W32(3) << 4;
474 #define VIS_HELPER(name, F) \
475 void name##16(void) \
482 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
483 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
484 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
485 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
490 uint32_t name##16s(uint32_t src1, uint32_t src2) \
497 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
498 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
503 void name##32(void) \
510 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
511 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
516 uint32_t name##32s(uint32_t src1, uint32_t src2) \
528 #define FADD(a, b) ((a) + (b))
529 #define FSUB(a, b) ((a) - (b))
530 VIS_HELPER(helper_fpadd
, FADD
)
531 VIS_HELPER(helper_fpsub
, FSUB
)
533 #define VIS_CMPHELPER(name, F) \
534 void name##16(void) \
541 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
542 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
543 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
544 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
549 void name##32(void) \
556 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
557 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
562 #define FCMPGT(a, b) ((a) > (b))
563 #define FCMPEQ(a, b) ((a) == (b))
564 #define FCMPLE(a, b) ((a) <= (b))
565 #define FCMPNE(a, b) ((a) != (b))
567 VIS_CMPHELPER(helper_fcmpgt
, FCMPGT
)
568 VIS_CMPHELPER(helper_fcmpeq
, FCMPEQ
)
569 VIS_CMPHELPER(helper_fcmple
, FCMPLE
)
570 VIS_CMPHELPER(helper_fcmpne
, FCMPNE
)
573 void helper_check_ieee_exceptions(void)
577 status
= get_float_exception_flags(&env
->fp_status
);
579 /* Copy IEEE 754 flags into FSR */
580 if (status
& float_flag_invalid
)
582 if (status
& float_flag_overflow
)
584 if (status
& float_flag_underflow
)
586 if (status
& float_flag_divbyzero
)
588 if (status
& float_flag_inexact
)
591 if ((env
->fsr
& FSR_CEXC_MASK
) & ((env
->fsr
& FSR_TEM_MASK
) >> 23)) {
592 /* Unmasked exception, generate a trap */
593 env
->fsr
|= FSR_FTT_IEEE_EXCP
;
594 raise_exception(TT_FP_EXCP
);
596 /* Accumulate exceptions */
597 env
->fsr
|= (env
->fsr
& FSR_CEXC_MASK
) << 5;
602 void helper_clear_float_exceptions(void)
604 set_float_exception_flags(0, &env
->fp_status
);
607 float32
helper_fabss(float32 src
)
609 return float32_abs(src
);
612 #ifdef TARGET_SPARC64
613 void helper_fabsd(void)
615 DT0
= float64_abs(DT1
);
618 void helper_fabsq(void)
620 QT0
= float128_abs(QT1
);
624 float32
helper_fsqrts(float32 src
)
626 return float32_sqrt(src
, &env
->fp_status
);
629 void helper_fsqrtd(void)
631 DT0
= float64_sqrt(DT1
, &env
->fp_status
);
634 void helper_fsqrtq(void)
636 QT0
= float128_sqrt(QT1
, &env
->fp_status
);
639 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
640 void glue(helper_, name) (void) \
642 target_ulong new_fsr; \
644 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
645 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
646 case float_relation_unordered: \
647 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
648 if ((env->fsr & FSR_NVM) || TRAP) { \
649 env->fsr |= new_fsr; \
650 env->fsr |= FSR_NVC; \
651 env->fsr |= FSR_FTT_IEEE_EXCP; \
652 raise_exception(TT_FP_EXCP); \
654 env->fsr |= FSR_NVA; \
657 case float_relation_less: \
658 new_fsr = FSR_FCC0 << FS; \
660 case float_relation_greater: \
661 new_fsr = FSR_FCC1 << FS; \
667 env->fsr |= new_fsr; \
669 #define GEN_FCMPS(name, size, FS, TRAP) \
670 void glue(helper_, name)(float32 src1, float32 src2) \
672 target_ulong new_fsr; \
674 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
675 switch (glue(size, _compare) (src1, src2, &env->fp_status)) { \
676 case float_relation_unordered: \
677 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
678 if ((env->fsr & FSR_NVM) || TRAP) { \
679 env->fsr |= new_fsr; \
680 env->fsr |= FSR_NVC; \
681 env->fsr |= FSR_FTT_IEEE_EXCP; \
682 raise_exception(TT_FP_EXCP); \
684 env->fsr |= FSR_NVA; \
687 case float_relation_less: \
688 new_fsr = FSR_FCC0 << FS; \
690 case float_relation_greater: \
691 new_fsr = FSR_FCC1 << FS; \
697 env->fsr |= new_fsr; \
700 GEN_FCMPS(fcmps
, float32
, 0, 0);
701 GEN_FCMP(fcmpd
, float64
, DT0
, DT1
, 0, 0);
703 GEN_FCMPS(fcmpes
, float32
, 0, 1);
704 GEN_FCMP(fcmped
, float64
, DT0
, DT1
, 0, 1);
706 GEN_FCMP(fcmpq
, float128
, QT0
, QT1
, 0, 0);
707 GEN_FCMP(fcmpeq
, float128
, QT0
, QT1
, 0, 1);
709 #ifdef TARGET_SPARC64
710 GEN_FCMPS(fcmps_fcc1
, float32
, 22, 0);
711 GEN_FCMP(fcmpd_fcc1
, float64
, DT0
, DT1
, 22, 0);
712 GEN_FCMP(fcmpq_fcc1
, float128
, QT0
, QT1
, 22, 0);
714 GEN_FCMPS(fcmps_fcc2
, float32
, 24, 0);
715 GEN_FCMP(fcmpd_fcc2
, float64
, DT0
, DT1
, 24, 0);
716 GEN_FCMP(fcmpq_fcc2
, float128
, QT0
, QT1
, 24, 0);
718 GEN_FCMPS(fcmps_fcc3
, float32
, 26, 0);
719 GEN_FCMP(fcmpd_fcc3
, float64
, DT0
, DT1
, 26, 0);
720 GEN_FCMP(fcmpq_fcc3
, float128
, QT0
, QT1
, 26, 0);
722 GEN_FCMPS(fcmpes_fcc1
, float32
, 22, 1);
723 GEN_FCMP(fcmped_fcc1
, float64
, DT0
, DT1
, 22, 1);
724 GEN_FCMP(fcmpeq_fcc1
, float128
, QT0
, QT1
, 22, 1);
726 GEN_FCMPS(fcmpes_fcc2
, float32
, 24, 1);
727 GEN_FCMP(fcmped_fcc2
, float64
, DT0
, DT1
, 24, 1);
728 GEN_FCMP(fcmpeq_fcc2
, float128
, QT0
, QT1
, 24, 1);
730 GEN_FCMPS(fcmpes_fcc3
, float32
, 26, 1);
731 GEN_FCMP(fcmped_fcc3
, float64
, DT0
, DT1
, 26, 1);
732 GEN_FCMP(fcmpeq_fcc3
, float128
, QT0
, QT1
, 26, 1);
736 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
738 static void dump_mxcc(CPUState
*env
)
740 printf("mxccdata: %016llx %016llx %016llx %016llx\n",
741 env
->mxccdata
[0], env
->mxccdata
[1],
742 env
->mxccdata
[2], env
->mxccdata
[3]);
743 printf("mxccregs: %016llx %016llx %016llx %016llx\n"
744 " %016llx %016llx %016llx %016llx\n",
745 env
->mxccregs
[0], env
->mxccregs
[1],
746 env
->mxccregs
[2], env
->mxccregs
[3],
747 env
->mxccregs
[4], env
->mxccregs
[5],
748 env
->mxccregs
[6], env
->mxccregs
[7]);
752 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
753 && defined(DEBUG_ASI)
754 static void dump_asi(const char *txt
, target_ulong addr
, int asi
, int size
,
760 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %02" PRIx64
"\n", txt
,
761 addr
, asi
, r1
& 0xff);
764 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %04" PRIx64
"\n", txt
,
765 addr
, asi
, r1
& 0xffff);
768 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %08" PRIx64
"\n", txt
,
769 addr
, asi
, r1
& 0xffffffff);
772 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %016" PRIx64
"\n", txt
,
779 #ifndef TARGET_SPARC64
780 #ifndef CONFIG_USER_ONLY
781 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
784 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
785 uint32_t last_addr
= addr
;
788 helper_check_align(addr
, size
- 1);
790 case 2: /* SuperSparc MXCC registers */
792 case 0x01c00a00: /* MXCC control register */
794 ret
= env
->mxccregs
[3];
796 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
799 case 0x01c00a04: /* MXCC control register */
801 ret
= env
->mxccregs
[3];
803 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
806 case 0x01c00c00: /* Module reset register */
808 ret
= env
->mxccregs
[5];
809 // should we do something here?
811 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
814 case 0x01c00f00: /* MBus port address register */
816 ret
= env
->mxccregs
[7];
818 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
822 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr
,
826 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
827 "addr = %08x -> ret = %08x,"
828 "addr = %08x\n", asi
, size
, sign
, last_addr
, ret
, addr
);
833 case 3: /* MMU probe */
837 mmulev
= (addr
>> 8) & 15;
841 ret
= mmu_probe(env
, addr
, mmulev
);
842 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64
"\n",
846 case 4: /* read MMU regs */
848 int reg
= (addr
>> 8) & 0x1f;
850 ret
= env
->mmuregs
[reg
];
851 if (reg
== 3) /* Fault status cleared on read */
853 else if (reg
== 0x13) /* Fault status read */
854 ret
= env
->mmuregs
[3];
855 else if (reg
== 0x14) /* Fault address read */
856 ret
= env
->mmuregs
[4];
857 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64
"\n", reg
, ret
);
860 case 5: // Turbosparc ITLB Diagnostic
861 case 6: // Turbosparc DTLB Diagnostic
862 case 7: // Turbosparc IOTLB Diagnostic
864 case 9: /* Supervisor code access */
867 ret
= ldub_code(addr
);
870 ret
= lduw_code(addr
);
874 ret
= ldl_code(addr
);
877 ret
= ldq_code(addr
);
881 case 0xa: /* User data access */
884 ret
= ldub_user(addr
);
887 ret
= lduw_user(addr
);
891 ret
= ldl_user(addr
);
894 ret
= ldq_user(addr
);
898 case 0xb: /* Supervisor data access */
901 ret
= ldub_kernel(addr
);
904 ret
= lduw_kernel(addr
);
908 ret
= ldl_kernel(addr
);
911 ret
= ldq_kernel(addr
);
915 case 0xc: /* I-cache tag */
916 case 0xd: /* I-cache data */
917 case 0xe: /* D-cache tag */
918 case 0xf: /* D-cache data */
920 case 0x20: /* MMU passthrough */
923 ret
= ldub_phys(addr
);
926 ret
= lduw_phys(addr
);
930 ret
= ldl_phys(addr
);
933 ret
= ldq_phys(addr
);
937 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
940 ret
= ldub_phys((target_phys_addr_t
)addr
941 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
944 ret
= lduw_phys((target_phys_addr_t
)addr
945 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
949 ret
= ldl_phys((target_phys_addr_t
)addr
950 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
953 ret
= ldq_phys((target_phys_addr_t
)addr
954 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
958 case 0x30: // Turbosparc secondary cache diagnostic
959 case 0x31: // Turbosparc RAM snoop
960 case 0x32: // Turbosparc page table descriptor diagnostic
961 case 0x39: /* data cache diagnostic register */
964 case 8: /* User code access, XXX */
966 do_unassigned_access(addr
, 0, 0, asi
);
986 dump_asi("read ", last_addr
, asi
, size
, ret
);
991 void helper_st_asi(target_ulong addr
, uint64_t val
, int asi
, int size
)
993 helper_check_align(addr
, size
- 1);
995 case 2: /* SuperSparc MXCC registers */
997 case 0x01c00000: /* MXCC stream data register 0 */
999 env
->mxccdata
[0] = val
;
1001 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1004 case 0x01c00008: /* MXCC stream data register 1 */
1006 env
->mxccdata
[1] = val
;
1008 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1011 case 0x01c00010: /* MXCC stream data register 2 */
1013 env
->mxccdata
[2] = val
;
1015 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1018 case 0x01c00018: /* MXCC stream data register 3 */
1020 env
->mxccdata
[3] = val
;
1022 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1025 case 0x01c00100: /* MXCC stream source */
1027 env
->mxccregs
[0] = val
;
1029 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1031 env
->mxccdata
[0] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1033 env
->mxccdata
[1] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1035 env
->mxccdata
[2] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1037 env
->mxccdata
[3] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1040 case 0x01c00200: /* MXCC stream destination */
1042 env
->mxccregs
[1] = val
;
1044 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1046 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 0,
1048 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 8,
1050 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 16,
1052 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 24,
1055 case 0x01c00a00: /* MXCC control register */
1057 env
->mxccregs
[3] = val
;
1059 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1062 case 0x01c00a04: /* MXCC control register */
1064 env
->mxccregs
[3] = (env
->mxccregs
[0xa] & 0xffffffff00000000ULL
)
1067 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1070 case 0x01c00e00: /* MXCC error register */
1071 // writing a 1 bit clears the error
1073 env
->mxccregs
[6] &= ~val
;
1075 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1078 case 0x01c00f00: /* MBus port address register */
1080 env
->mxccregs
[7] = val
;
1082 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1086 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr
,
1090 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %08x\n", asi
,
1096 case 3: /* MMU flush */
1100 mmulev
= (addr
>> 8) & 15;
1101 DPRINTF_MMU("mmu flush level %d\n", mmulev
);
1103 case 0: // flush page
1104 tlb_flush_page(env
, addr
& 0xfffff000);
1106 case 1: // flush segment (256k)
1107 case 2: // flush region (16M)
1108 case 3: // flush context (4G)
1109 case 4: // flush entire
1120 case 4: /* write MMU regs */
1122 int reg
= (addr
>> 8) & 0x1f;
1125 oldreg
= env
->mmuregs
[reg
];
1127 case 0: // Control Register
1128 env
->mmuregs
[reg
] = (env
->mmuregs
[reg
] & 0xff000000) |
1130 // Mappings generated during no-fault mode or MMU
1131 // disabled mode are invalid in normal mode
1132 if ((oldreg
& (MMU_E
| MMU_NF
| env
->def
->mmu_bm
)) !=
1133 (env
->mmuregs
[reg
] & (MMU_E
| MMU_NF
| env
->def
->mmu_bm
)))
1136 case 1: // Context Table Pointer Register
1137 env
->mmuregs
[reg
] = val
& env
->def
->mmu_ctpr_mask
;
1139 case 2: // Context Register
1140 env
->mmuregs
[reg
] = val
& env
->def
->mmu_cxr_mask
;
1141 if (oldreg
!= env
->mmuregs
[reg
]) {
1142 /* we flush when the MMU context changes because
1143 QEMU has no MMU context support */
1147 case 3: // Synchronous Fault Status Register with Clear
1148 case 4: // Synchronous Fault Address Register
1150 case 0x10: // TLB Replacement Control Register
1151 env
->mmuregs
[reg
] = val
& env
->def
->mmu_trcr_mask
;
1153 case 0x13: // Synchronous Fault Status Register with Read and Clear
1154 env
->mmuregs
[3] = val
& env
->def
->mmu_sfsr_mask
;
1156 case 0x14: // Synchronous Fault Address Register
1157 env
->mmuregs
[4] = val
;
1160 env
->mmuregs
[reg
] = val
;
1163 if (oldreg
!= env
->mmuregs
[reg
]) {
1164 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1165 reg
, oldreg
, env
->mmuregs
[reg
]);
1172 case 5: // Turbosparc ITLB Diagnostic
1173 case 6: // Turbosparc DTLB Diagnostic
1174 case 7: // Turbosparc IOTLB Diagnostic
1176 case 0xa: /* User data access */
1179 stb_user(addr
, val
);
1182 stw_user(addr
, val
);
1186 stl_user(addr
, val
);
1189 stq_user(addr
, val
);
1193 case 0xb: /* Supervisor data access */
1196 stb_kernel(addr
, val
);
1199 stw_kernel(addr
, val
);
1203 stl_kernel(addr
, val
);
1206 stq_kernel(addr
, val
);
1210 case 0xc: /* I-cache tag */
1211 case 0xd: /* I-cache data */
1212 case 0xe: /* D-cache tag */
1213 case 0xf: /* D-cache data */
1214 case 0x10: /* I/D-cache flush page */
1215 case 0x11: /* I/D-cache flush segment */
1216 case 0x12: /* I/D-cache flush region */
1217 case 0x13: /* I/D-cache flush context */
1218 case 0x14: /* I/D-cache flush user */
1220 case 0x17: /* Block copy, sta access */
1226 uint32_t src
= val
& ~3, dst
= addr
& ~3, temp
;
1228 for (i
= 0; i
< 32; i
+= 4, src
+= 4, dst
+= 4) {
1229 temp
= ldl_kernel(src
);
1230 stl_kernel(dst
, temp
);
1234 case 0x1f: /* Block fill, stda access */
1237 // fill 32 bytes with val
1239 uint32_t dst
= addr
& 7;
1241 for (i
= 0; i
< 32; i
+= 8, dst
+= 8)
1242 stq_kernel(dst
, val
);
1245 case 0x20: /* MMU passthrough */
1249 stb_phys(addr
, val
);
1252 stw_phys(addr
, val
);
1256 stl_phys(addr
, val
);
1259 stq_phys(addr
, val
);
1264 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1268 stb_phys((target_phys_addr_t
)addr
1269 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1272 stw_phys((target_phys_addr_t
)addr
1273 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1277 stl_phys((target_phys_addr_t
)addr
1278 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1281 stq_phys((target_phys_addr_t
)addr
1282 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1287 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1288 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1289 // Turbosparc snoop RAM
1290 case 0x32: // store buffer control or Turbosparc page table
1291 // descriptor diagnostic
1292 case 0x36: /* I-cache flash clear */
1293 case 0x37: /* D-cache flash clear */
1294 case 0x38: /* breakpoint diagnostics */
1295 case 0x4c: /* breakpoint action */
1297 case 8: /* User code access, XXX */
1298 case 9: /* Supervisor code access, XXX */
1300 do_unassigned_access(addr
, 1, 0, asi
);
1304 dump_asi("write", addr
, asi
, size
, val
);
1308 #endif /* CONFIG_USER_ONLY */
1309 #else /* TARGET_SPARC64 */
1311 #ifdef CONFIG_USER_ONLY
1312 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
1315 #if defined(DEBUG_ASI)
1316 target_ulong last_addr
= addr
;
1320 raise_exception(TT_PRIV_ACT
);
1322 helper_check_align(addr
, size
- 1);
1323 address_mask(env
, &addr
);
1326 case 0x82: // Primary no-fault
1327 case 0x8a: // Primary no-fault LE
1328 if (page_check_range(addr
, size
, PAGE_READ
) == -1) {
1330 dump_asi("read ", last_addr
, asi
, size
, ret
);
1335 case 0x80: // Primary
1336 case 0x88: // Primary LE
1340 ret
= ldub_raw(addr
);
1343 ret
= lduw_raw(addr
);
1346 ret
= ldl_raw(addr
);
1350 ret
= ldq_raw(addr
);
1355 case 0x83: // Secondary no-fault
1356 case 0x8b: // Secondary no-fault LE
1357 if (page_check_range(addr
, size
, PAGE_READ
) == -1) {
1359 dump_asi("read ", last_addr
, asi
, size
, ret
);
1364 case 0x81: // Secondary
1365 case 0x89: // Secondary LE
1372 /* Convert from little endian */
1374 case 0x88: // Primary LE
1375 case 0x89: // Secondary LE
1376 case 0x8a: // Primary no-fault LE
1377 case 0x8b: // Secondary no-fault LE
1395 /* Convert to signed number */
1402 ret
= (int16_t) ret
;
1405 ret
= (int32_t) ret
;
1412 dump_asi("read ", last_addr
, asi
, size
, ret
);
1417 void helper_st_asi(target_ulong addr
, target_ulong val
, int asi
, int size
)
1420 dump_asi("write", addr
, asi
, size
, val
);
1423 raise_exception(TT_PRIV_ACT
);
1425 helper_check_align(addr
, size
- 1);
1426 address_mask(env
, &addr
);
1428 /* Convert to little endian */
1430 case 0x88: // Primary LE
1431 case 0x89: // Secondary LE
1434 addr
= bswap16(addr
);
1437 addr
= bswap32(addr
);
1440 addr
= bswap64(addr
);
1450 case 0x80: // Primary
1451 case 0x88: // Primary LE
1470 case 0x81: // Secondary
1471 case 0x89: // Secondary LE
1475 case 0x82: // Primary no-fault, RO
1476 case 0x83: // Secondary no-fault, RO
1477 case 0x8a: // Primary no-fault LE, RO
1478 case 0x8b: // Secondary no-fault LE, RO
1480 do_unassigned_access(addr
, 1, 0, 1);
1485 #else /* CONFIG_USER_ONLY */
1487 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
1490 #if defined(DEBUG_ASI)
1491 target_ulong last_addr
= addr
;
1494 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
1495 || ((env
->def
->features
& CPU_FEATURE_HYPV
)
1496 && asi
>= 0x30 && asi
< 0x80
1497 && !(env
->hpstate
& HS_PRIV
)))
1498 raise_exception(TT_PRIV_ACT
);
1500 helper_check_align(addr
, size
- 1);
1502 case 0x82: // Primary no-fault
1503 case 0x8a: // Primary no-fault LE
1504 if (cpu_get_phys_page_debug(env
, addr
) == -1ULL) {
1506 dump_asi("read ", last_addr
, asi
, size
, ret
);
1511 case 0x10: // As if user primary
1512 case 0x18: // As if user primary LE
1513 case 0x80: // Primary
1514 case 0x88: // Primary LE
1515 if ((asi
& 0x80) && (env
->pstate
& PS_PRIV
)) {
1516 if ((env
->def
->features
& CPU_FEATURE_HYPV
)
1517 && env
->hpstate
& HS_PRIV
) {
1520 ret
= ldub_hypv(addr
);
1523 ret
= lduw_hypv(addr
);
1526 ret
= ldl_hypv(addr
);
1530 ret
= ldq_hypv(addr
);
1536 ret
= ldub_kernel(addr
);
1539 ret
= lduw_kernel(addr
);
1542 ret
= ldl_kernel(addr
);
1546 ret
= ldq_kernel(addr
);
1553 ret
= ldub_user(addr
);
1556 ret
= lduw_user(addr
);
1559 ret
= ldl_user(addr
);
1563 ret
= ldq_user(addr
);
1568 case 0x14: // Bypass
1569 case 0x15: // Bypass, non-cacheable
1570 case 0x1c: // Bypass LE
1571 case 0x1d: // Bypass, non-cacheable LE
1575 ret
= ldub_phys(addr
);
1578 ret
= lduw_phys(addr
);
1581 ret
= ldl_phys(addr
);
1585 ret
= ldq_phys(addr
);
1590 case 0x24: // Nucleus quad LDD 128 bit atomic
1591 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1592 // Only ldda allowed
1593 raise_exception(TT_ILL_INSN
);
1595 case 0x83: // Secondary no-fault
1596 case 0x8b: // Secondary no-fault LE
1597 if (cpu_get_phys_page_debug(env
, addr
) == -1ULL) {
1599 dump_asi("read ", last_addr
, asi
, size
, ret
);
1604 case 0x04: // Nucleus
1605 case 0x0c: // Nucleus Little Endian (LE)
1606 case 0x11: // As if user secondary
1607 case 0x19: // As if user secondary LE
1608 case 0x4a: // UPA config
1609 case 0x81: // Secondary
1610 case 0x89: // Secondary LE
1616 case 0x50: // I-MMU regs
1618 int reg
= (addr
>> 3) & 0xf;
1620 ret
= env
->immuregs
[reg
];
1623 case 0x51: // I-MMU 8k TSB pointer
1624 case 0x52: // I-MMU 64k TSB pointer
1627 case 0x55: // I-MMU data access
1629 int reg
= (addr
>> 3) & 0x3f;
1631 ret
= env
->itlb_tte
[reg
];
1634 case 0x56: // I-MMU tag read
1636 int reg
= (addr
>> 3) & 0x3f;
1638 ret
= env
->itlb_tag
[reg
];
1641 case 0x58: // D-MMU regs
1643 int reg
= (addr
>> 3) & 0xf;
1645 ret
= env
->dmmuregs
[reg
];
1648 case 0x5d: // D-MMU data access
1650 int reg
= (addr
>> 3) & 0x3f;
1652 ret
= env
->dtlb_tte
[reg
];
1655 case 0x5e: // D-MMU tag read
1657 int reg
= (addr
>> 3) & 0x3f;
1659 ret
= env
->dtlb_tag
[reg
];
1662 case 0x46: // D-cache data
1663 case 0x47: // D-cache tag access
1664 case 0x4b: // E-cache error enable
1665 case 0x4c: // E-cache asynchronous fault status
1666 case 0x4d: // E-cache asynchronous fault address
1667 case 0x4e: // E-cache tag data
1668 case 0x66: // I-cache instruction access
1669 case 0x67: // I-cache tag access
1670 case 0x6e: // I-cache predecode
1671 case 0x6f: // I-cache LRU etc.
1672 case 0x76: // E-cache tag
1673 case 0x7e: // E-cache tag
1675 case 0x59: // D-MMU 8k TSB pointer
1676 case 0x5a: // D-MMU 64k TSB pointer
1677 case 0x5b: // D-MMU data pointer
1678 case 0x48: // Interrupt dispatch, RO
1679 case 0x49: // Interrupt data receive
1680 case 0x7f: // Incoming interrupt vector, RO
1683 case 0x54: // I-MMU data in, WO
1684 case 0x57: // I-MMU demap, WO
1685 case 0x5c: // D-MMU data in, WO
1686 case 0x5f: // D-MMU demap, WO
1687 case 0x77: // Interrupt vector, WO
1689 do_unassigned_access(addr
, 0, 0, 1);
1694 /* Convert from little endian */
1696 case 0x0c: // Nucleus Little Endian (LE)
1697 case 0x18: // As if user primary LE
1698 case 0x19: // As if user secondary LE
1699 case 0x1c: // Bypass LE
1700 case 0x1d: // Bypass, non-cacheable LE
1701 case 0x88: // Primary LE
1702 case 0x89: // Secondary LE
1703 case 0x8a: // Primary no-fault LE
1704 case 0x8b: // Secondary no-fault LE
1722 /* Convert to signed number */
1729 ret
= (int16_t) ret
;
1732 ret
= (int32_t) ret
;
1739 dump_asi("read ", last_addr
, asi
, size
, ret
);
1744 void helper_st_asi(target_ulong addr
, target_ulong val
, int asi
, int size
)
1747 dump_asi("write", addr
, asi
, size
, val
);
1749 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
1750 || ((env
->def
->features
& CPU_FEATURE_HYPV
)
1751 && asi
>= 0x30 && asi
< 0x80
1752 && !(env
->hpstate
& HS_PRIV
)))
1753 raise_exception(TT_PRIV_ACT
);
1755 helper_check_align(addr
, size
- 1);
1756 /* Convert to little endian */
1758 case 0x0c: // Nucleus Little Endian (LE)
1759 case 0x18: // As if user primary LE
1760 case 0x19: // As if user secondary LE
1761 case 0x1c: // Bypass LE
1762 case 0x1d: // Bypass, non-cacheable LE
1763 case 0x88: // Primary LE
1764 case 0x89: // Secondary LE
1767 addr
= bswap16(addr
);
1770 addr
= bswap32(addr
);
1773 addr
= bswap64(addr
);
1783 case 0x10: // As if user primary
1784 case 0x18: // As if user primary LE
1785 case 0x80: // Primary
1786 case 0x88: // Primary LE
1787 if ((asi
& 0x80) && (env
->pstate
& PS_PRIV
)) {
1788 if ((env
->def
->features
& CPU_FEATURE_HYPV
)
1789 && env
->hpstate
& HS_PRIV
) {
1792 stb_hypv(addr
, val
);
1795 stw_hypv(addr
, val
);
1798 stl_hypv(addr
, val
);
1802 stq_hypv(addr
, val
);
1808 stb_kernel(addr
, val
);
1811 stw_kernel(addr
, val
);
1814 stl_kernel(addr
, val
);
1818 stq_kernel(addr
, val
);
1825 stb_user(addr
, val
);
1828 stw_user(addr
, val
);
1831 stl_user(addr
, val
);
1835 stq_user(addr
, val
);
1840 case 0x14: // Bypass
1841 case 0x15: // Bypass, non-cacheable
1842 case 0x1c: // Bypass LE
1843 case 0x1d: // Bypass, non-cacheable LE
1847 stb_phys(addr
, val
);
1850 stw_phys(addr
, val
);
1853 stl_phys(addr
, val
);
1857 stq_phys(addr
, val
);
1862 case 0x24: // Nucleus quad LDD 128 bit atomic
1863 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1864 // Only ldda allowed
1865 raise_exception(TT_ILL_INSN
);
1867 case 0x04: // Nucleus
1868 case 0x0c: // Nucleus Little Endian (LE)
1869 case 0x11: // As if user secondary
1870 case 0x19: // As if user secondary LE
1871 case 0x4a: // UPA config
1872 case 0x81: // Secondary
1873 case 0x89: // Secondary LE
1881 env
->lsu
= val
& (DMMU_E
| IMMU_E
);
1882 // Mappings generated during D/I MMU disabled mode are
1883 // invalid in normal mode
1884 if (oldreg
!= env
->lsu
) {
1885 DPRINTF_MMU("LSU change: 0x%" PRIx64
" -> 0x%" PRIx64
"\n",
1894 case 0x50: // I-MMU regs
1896 int reg
= (addr
>> 3) & 0xf;
1899 oldreg
= env
->immuregs
[reg
];
1904 case 1: // Not in I-MMU
1911 val
= 0; // Clear SFSR
1913 case 5: // TSB access
1914 case 6: // Tag access
1918 env
->immuregs
[reg
] = val
;
1919 if (oldreg
!= env
->immuregs
[reg
]) {
1920 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64
" -> 0x%08"
1921 PRIx64
"\n", reg
, oldreg
, env
->immuregs
[reg
]);
1928 case 0x54: // I-MMU data in
1932 // Try finding an invalid entry
1933 for (i
= 0; i
< 64; i
++) {
1934 if ((env
->itlb_tte
[i
] & 0x8000000000000000ULL
) == 0) {
1935 env
->itlb_tag
[i
] = env
->immuregs
[6];
1936 env
->itlb_tte
[i
] = val
;
1940 // Try finding an unlocked entry
1941 for (i
= 0; i
< 64; i
++) {
1942 if ((env
->itlb_tte
[i
] & 0x40) == 0) {
1943 env
->itlb_tag
[i
] = env
->immuregs
[6];
1944 env
->itlb_tte
[i
] = val
;
1951 case 0x55: // I-MMU data access
1953 unsigned int i
= (addr
>> 3) & 0x3f;
1955 env
->itlb_tag
[i
] = env
->immuregs
[6];
1956 env
->itlb_tte
[i
] = val
;
1959 case 0x57: // I-MMU demap
1962 case 0x58: // D-MMU regs
1964 int reg
= (addr
>> 3) & 0xf;
1967 oldreg
= env
->dmmuregs
[reg
];
1973 if ((val
& 1) == 0) {
1974 val
= 0; // Clear SFSR, Fault address
1975 env
->dmmuregs
[4] = 0;
1977 env
->dmmuregs
[reg
] = val
;
1979 case 1: // Primary context
1980 case 2: // Secondary context
1981 case 5: // TSB access
1982 case 6: // Tag access
1983 case 7: // Virtual Watchpoint
1984 case 8: // Physical Watchpoint
1988 env
->dmmuregs
[reg
] = val
;
1989 if (oldreg
!= env
->dmmuregs
[reg
]) {
1990 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64
" -> 0x%08"
1991 PRIx64
"\n", reg
, oldreg
, env
->dmmuregs
[reg
]);
1998 case 0x5c: // D-MMU data in
2002 // Try finding an invalid entry
2003 for (i
= 0; i
< 64; i
++) {
2004 if ((env
->dtlb_tte
[i
] & 0x8000000000000000ULL
) == 0) {
2005 env
->dtlb_tag
[i
] = env
->dmmuregs
[6];
2006 env
->dtlb_tte
[i
] = val
;
2010 // Try finding an unlocked entry
2011 for (i
= 0; i
< 64; i
++) {
2012 if ((env
->dtlb_tte
[i
] & 0x40) == 0) {
2013 env
->dtlb_tag
[i
] = env
->dmmuregs
[6];
2014 env
->dtlb_tte
[i
] = val
;
2021 case 0x5d: // D-MMU data access
2023 unsigned int i
= (addr
>> 3) & 0x3f;
2025 env
->dtlb_tag
[i
] = env
->dmmuregs
[6];
2026 env
->dtlb_tte
[i
] = val
;
2029 case 0x5f: // D-MMU demap
2030 case 0x49: // Interrupt data receive
2033 case 0x46: // D-cache data
2034 case 0x47: // D-cache tag access
2035 case 0x4b: // E-cache error enable
2036 case 0x4c: // E-cache asynchronous fault status
2037 case 0x4d: // E-cache asynchronous fault address
2038 case 0x4e: // E-cache tag data
2039 case 0x66: // I-cache instruction access
2040 case 0x67: // I-cache tag access
2041 case 0x6e: // I-cache predecode
2042 case 0x6f: // I-cache LRU etc.
2043 case 0x76: // E-cache tag
2044 case 0x7e: // E-cache tag
2046 case 0x51: // I-MMU 8k TSB pointer, RO
2047 case 0x52: // I-MMU 64k TSB pointer, RO
2048 case 0x56: // I-MMU tag read, RO
2049 case 0x59: // D-MMU 8k TSB pointer, RO
2050 case 0x5a: // D-MMU 64k TSB pointer, RO
2051 case 0x5b: // D-MMU data pointer, RO
2052 case 0x5e: // D-MMU tag read, RO
2053 case 0x48: // Interrupt dispatch, RO
2054 case 0x7f: // Incoming interrupt vector, RO
2055 case 0x82: // Primary no-fault, RO
2056 case 0x83: // Secondary no-fault, RO
2057 case 0x8a: // Primary no-fault LE, RO
2058 case 0x8b: // Secondary no-fault LE, RO
2060 do_unassigned_access(addr
, 1, 0, 1);
2064 #endif /* CONFIG_USER_ONLY */
2066 void helper_ldda_asi(target_ulong addr
, int asi
, int rd
)
2068 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
2069 || ((env
->def
->features
& CPU_FEATURE_HYPV
)
2070 && asi
>= 0x30 && asi
< 0x80
2071 && !(env
->hpstate
& HS_PRIV
)))
2072 raise_exception(TT_PRIV_ACT
);
2075 case 0x24: // Nucleus quad LDD 128 bit atomic
2076 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2077 helper_check_align(addr
, 0xf);
2079 env
->gregs
[1] = ldq_kernel(addr
+ 8);
2081 bswap64s(&env
->gregs
[1]);
2082 } else if (rd
< 8) {
2083 env
->gregs
[rd
] = ldq_kernel(addr
);
2084 env
->gregs
[rd
+ 1] = ldq_kernel(addr
+ 8);
2086 bswap64s(&env
->gregs
[rd
]);
2087 bswap64s(&env
->gregs
[rd
+ 1]);
2090 env
->regwptr
[rd
] = ldq_kernel(addr
);
2091 env
->regwptr
[rd
+ 1] = ldq_kernel(addr
+ 8);
2093 bswap64s(&env
->regwptr
[rd
]);
2094 bswap64s(&env
->regwptr
[rd
+ 1]);
2099 helper_check_align(addr
, 0x3);
2101 env
->gregs
[1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2103 env
->gregs
[rd
] = helper_ld_asi(addr
, asi
, 4, 0);
2104 env
->gregs
[rd
+ 1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2106 env
->regwptr
[rd
] = helper_ld_asi(addr
, asi
, 4, 0);
2107 env
->regwptr
[rd
+ 1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2113 void helper_ldf_asi(target_ulong addr
, int asi
, int size
, int rd
)
2118 helper_check_align(addr
, 3);
2120 case 0xf0: // Block load primary
2121 case 0xf1: // Block load secondary
2122 case 0xf8: // Block load primary LE
2123 case 0xf9: // Block load secondary LE
2125 raise_exception(TT_ILL_INSN
);
2128 helper_check_align(addr
, 0x3f);
2129 for (i
= 0; i
< 16; i
++) {
2130 *(uint32_t *)&env
->fpr
[rd
++] = helper_ld_asi(addr
, asi
& 0x8f, 4,
2140 val
= helper_ld_asi(addr
, asi
, size
, 0);
2144 *((uint32_t *)&env
->fpr
[rd
]) = val
;
2147 *((int64_t *)&DT0
) = val
;
2155 void helper_stf_asi(target_ulong addr
, int asi
, int size
, int rd
)
2158 target_ulong val
= 0;
2160 helper_check_align(addr
, 3);
2162 case 0xf0: // Block store primary
2163 case 0xf1: // Block store secondary
2164 case 0xf8: // Block store primary LE
2165 case 0xf9: // Block store secondary LE
2167 raise_exception(TT_ILL_INSN
);
2170 helper_check_align(addr
, 0x3f);
2171 for (i
= 0; i
< 16; i
++) {
2172 val
= *(uint32_t *)&env
->fpr
[rd
++];
2173 helper_st_asi(addr
, val
, asi
& 0x8f, 4);
2185 val
= *((uint32_t *)&env
->fpr
[rd
]);
2188 val
= *((int64_t *)&DT0
);
2194 helper_st_asi(addr
, val
, asi
, size
);
2197 target_ulong
helper_cas_asi(target_ulong addr
, target_ulong val1
,
2198 target_ulong val2
, uint32_t asi
)
2202 val1
&= 0xffffffffUL
;
2203 ret
= helper_ld_asi(addr
, asi
, 4, 0);
2204 ret
&= 0xffffffffUL
;
2206 helper_st_asi(addr
, val2
& 0xffffffffUL
, asi
, 4);
2210 target_ulong
helper_casx_asi(target_ulong addr
, target_ulong val1
,
2211 target_ulong val2
, uint32_t asi
)
2215 ret
= helper_ld_asi(addr
, asi
, 8, 0);
2217 helper_st_asi(addr
, val2
, asi
, 8);
2220 #endif /* TARGET_SPARC64 */
2222 #ifndef TARGET_SPARC64
2223 void helper_rett(void)
2227 if (env
->psret
== 1)
2228 raise_exception(TT_ILL_INSN
);
2231 cwp
= cpu_cwp_inc(env
, env
->cwp
+ 1) ;
2232 if (env
->wim
& (1 << cwp
)) {
2233 raise_exception(TT_WIN_UNF
);
2236 env
->psrs
= env
->psrps
;
2240 target_ulong
helper_udiv(target_ulong a
, target_ulong b
)
2245 x0
= (a
& 0xffffffff) | ((int64_t) (env
->y
) << 32);
2249 raise_exception(TT_DIV_ZERO
);
2253 if (x0
> 0xffffffff) {
2262 target_ulong
helper_sdiv(target_ulong a
, target_ulong b
)
2267 x0
= (a
& 0xffffffff) | ((int64_t) (env
->y
) << 32);
2271 raise_exception(TT_DIV_ZERO
);
2275 if ((int32_t) x0
!= x0
) {
2277 return x0
< 0? 0x80000000: 0x7fffffff;
2284 uint64_t helper_pack64(target_ulong high
, target_ulong low
)
2286 return ((uint64_t)high
<< 32) | (uint64_t)(low
& 0xffffffff);
2289 void helper_stdf(target_ulong addr
, int mem_idx
)
2291 helper_check_align(addr
, 7);
2292 #if !defined(CONFIG_USER_ONLY)
2295 stfq_user(addr
, DT0
);
2298 stfq_kernel(addr
, DT0
);
2300 #ifdef TARGET_SPARC64
2302 stfq_hypv(addr
, DT0
);
2309 address_mask(env
, &addr
);
2310 stfq_raw(addr
, DT0
);
2314 void helper_lddf(target_ulong addr
, int mem_idx
)
2316 helper_check_align(addr
, 7);
2317 #if !defined(CONFIG_USER_ONLY)
2320 DT0
= ldfq_user(addr
);
2323 DT0
= ldfq_kernel(addr
);
2325 #ifdef TARGET_SPARC64
2327 DT0
= ldfq_hypv(addr
);
2334 address_mask(env
, &addr
);
2335 DT0
= ldfq_raw(addr
);
2339 void helper_ldqf(target_ulong addr
, int mem_idx
)
2341 // XXX add 128 bit load
2344 helper_check_align(addr
, 7);
2345 #if !defined(CONFIG_USER_ONLY)
2348 u
.ll
.upper
= ldq_user(addr
);
2349 u
.ll
.lower
= ldq_user(addr
+ 8);
2353 u
.ll
.upper
= ldq_kernel(addr
);
2354 u
.ll
.lower
= ldq_kernel(addr
+ 8);
2357 #ifdef TARGET_SPARC64
2359 u
.ll
.upper
= ldq_hypv(addr
);
2360 u
.ll
.lower
= ldq_hypv(addr
+ 8);
2368 address_mask(env
, &addr
);
2369 u
.ll
.upper
= ldq_raw(addr
);
2370 u
.ll
.lower
= ldq_raw((addr
+ 8) & 0xffffffffULL
);
2375 void helper_stqf(target_ulong addr
, int mem_idx
)
2377 // XXX add 128 bit store
2380 helper_check_align(addr
, 7);
2381 #if !defined(CONFIG_USER_ONLY)
2385 stq_user(addr
, u
.ll
.upper
);
2386 stq_user(addr
+ 8, u
.ll
.lower
);
2390 stq_kernel(addr
, u
.ll
.upper
);
2391 stq_kernel(addr
+ 8, u
.ll
.lower
);
2393 #ifdef TARGET_SPARC64
2396 stq_hypv(addr
, u
.ll
.upper
);
2397 stq_hypv(addr
+ 8, u
.ll
.lower
);
2405 address_mask(env
, &addr
);
2406 stq_raw(addr
, u
.ll
.upper
);
2407 stq_raw((addr
+ 8) & 0xffffffffULL
, u
.ll
.lower
);
2411 static inline void set_fsr(void)
2415 switch (env
->fsr
& FSR_RD_MASK
) {
2416 case FSR_RD_NEAREST
:
2417 rnd_mode
= float_round_nearest_even
;
2421 rnd_mode
= float_round_to_zero
;
2424 rnd_mode
= float_round_up
;
2427 rnd_mode
= float_round_down
;
2430 set_float_rounding_mode(rnd_mode
, &env
->fp_status
);
2433 void helper_ldfsr(uint32_t new_fsr
)
2435 env
->fsr
= (new_fsr
& FSR_LDFSR_MASK
) | (env
->fsr
& FSR_LDFSR_OLDMASK
);
2439 #ifdef TARGET_SPARC64
2440 void helper_ldxfsr(uint64_t new_fsr
)
2442 env
->fsr
= (new_fsr
& FSR_LDXFSR_MASK
) | (env
->fsr
& FSR_LDXFSR_OLDMASK
);
2447 void helper_debug(void)
2449 env
->exception_index
= EXCP_DEBUG
;
2453 #ifndef TARGET_SPARC64
2454 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2456 void helper_save(void)
2460 cwp
= cpu_cwp_dec(env
, env
->cwp
- 1);
2461 if (env
->wim
& (1 << cwp
)) {
2462 raise_exception(TT_WIN_OVF
);
2467 void helper_restore(void)
2471 cwp
= cpu_cwp_inc(env
, env
->cwp
+ 1);
2472 if (env
->wim
& (1 << cwp
)) {
2473 raise_exception(TT_WIN_UNF
);
2478 void helper_wrpsr(target_ulong new_psr
)
2480 if ((new_psr
& PSR_CWP
) >= env
->nwindows
)
2481 raise_exception(TT_ILL_INSN
);
2483 PUT_PSR(env
, new_psr
);
2486 target_ulong
helper_rdpsr(void)
2488 return GET_PSR(env
);
2492 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2494 void helper_save(void)
2498 cwp
= cpu_cwp_dec(env
, env
->cwp
- 1);
2499 if (env
->cansave
== 0) {
2500 raise_exception(TT_SPILL
| (env
->otherwin
!= 0 ?
2501 (TT_WOTHER
| ((env
->wstate
& 0x38) >> 1)):
2502 ((env
->wstate
& 0x7) << 2)));
2504 if (env
->cleanwin
- env
->canrestore
== 0) {
2505 // XXX Clean windows without trap
2506 raise_exception(TT_CLRWIN
);
2515 void helper_restore(void)
2519 cwp
= cpu_cwp_inc(env
, env
->cwp
+ 1);
2520 if (env
->canrestore
== 0) {
2521 raise_exception(TT_FILL
| (env
->otherwin
!= 0 ?
2522 (TT_WOTHER
| ((env
->wstate
& 0x38) >> 1)):
2523 ((env
->wstate
& 0x7) << 2)));
2531 void helper_flushw(void)
2533 if (env
->cansave
!= env
->nwindows
- 2) {
2534 raise_exception(TT_SPILL
| (env
->otherwin
!= 0 ?
2535 (TT_WOTHER
| ((env
->wstate
& 0x38) >> 1)):
2536 ((env
->wstate
& 0x7) << 2)));
2540 void helper_saved(void)
2543 if (env
->otherwin
== 0)
2549 void helper_restored(void)
2552 if (env
->cleanwin
< env
->nwindows
- 1)
2554 if (env
->otherwin
== 0)
2560 target_ulong
helper_rdccr(void)
2562 return GET_CCR(env
);
2565 void helper_wrccr(target_ulong new_ccr
)
2567 PUT_CCR(env
, new_ccr
);
2570 // CWP handling is reversed in V9, but we still use the V8 register
2572 target_ulong
helper_rdcwp(void)
2574 return GET_CWP64(env
);
2577 void helper_wrcwp(target_ulong new_cwp
)
2579 PUT_CWP64(env
, new_cwp
);
2582 // This function uses non-native bit order
2583 #define GET_FIELD(X, FROM, TO) \
2584 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
2586 // This function uses the order in the manuals, i.e. bit 0 is 2^0
2587 #define GET_FIELD_SP(X, FROM, TO) \
2588 GET_FIELD(X, 63 - (TO), 63 - (FROM))
2590 target_ulong
helper_array8(target_ulong pixel_addr
, target_ulong cubesize
)
2592 return (GET_FIELD_SP(pixel_addr
, 60, 63) << (17 + 2 * cubesize
)) |
2593 (GET_FIELD_SP(pixel_addr
, 39, 39 + cubesize
- 1) << (17 + cubesize
)) |
2594 (GET_FIELD_SP(pixel_addr
, 17 + cubesize
- 1, 17) << 17) |
2595 (GET_FIELD_SP(pixel_addr
, 56, 59) << 13) |
2596 (GET_FIELD_SP(pixel_addr
, 35, 38) << 9) |
2597 (GET_FIELD_SP(pixel_addr
, 13, 16) << 5) |
2598 (((pixel_addr
>> 55) & 1) << 4) |
2599 (GET_FIELD_SP(pixel_addr
, 33, 34) << 2) |
2600 GET_FIELD_SP(pixel_addr
, 11, 12);
2603 target_ulong
helper_alignaddr(target_ulong addr
, target_ulong offset
)
2607 tmp
= addr
+ offset
;
2609 env
->gsr
|= tmp
& 7ULL;
2613 target_ulong
helper_popc(target_ulong val
)
2615 return ctpop64(val
);
2618 static inline uint64_t *get_gregset(uint64_t pstate
)
2633 static inline void change_pstate(uint64_t new_pstate
)
2635 uint64_t pstate_regs
, new_pstate_regs
;
2636 uint64_t *src
, *dst
;
2638 pstate_regs
= env
->pstate
& 0xc01;
2639 new_pstate_regs
= new_pstate
& 0xc01;
2640 if (new_pstate_regs
!= pstate_regs
) {
2641 // Switch global register bank
2642 src
= get_gregset(new_pstate_regs
);
2643 dst
= get_gregset(pstate_regs
);
2644 memcpy32(dst
, env
->gregs
);
2645 memcpy32(env
->gregs
, src
);
2647 env
->pstate
= new_pstate
;
2650 void helper_wrpstate(target_ulong new_state
)
2652 if (!(env
->def
->features
& CPU_FEATURE_GL
))
2653 change_pstate(new_state
& 0xf3f);
2656 void helper_done(void)
2658 env
->pc
= env
->tsptr
->tpc
;
2659 env
->npc
= env
->tsptr
->tnpc
+ 4;
2660 PUT_CCR(env
, env
->tsptr
->tstate
>> 32);
2661 env
->asi
= (env
->tsptr
->tstate
>> 24) & 0xff;
2662 change_pstate((env
->tsptr
->tstate
>> 8) & 0xf3f);
2663 PUT_CWP64(env
, env
->tsptr
->tstate
& 0xff);
2665 env
->tsptr
= &env
->ts
[env
->tl
& MAXTL_MASK
];
2668 void helper_retry(void)
2670 env
->pc
= env
->tsptr
->tpc
;
2671 env
->npc
= env
->tsptr
->tnpc
;
2672 PUT_CCR(env
, env
->tsptr
->tstate
>> 32);
2673 env
->asi
= (env
->tsptr
->tstate
>> 24) & 0xff;
2674 change_pstate((env
->tsptr
->tstate
>> 8) & 0xf3f);
2675 PUT_CWP64(env
, env
->tsptr
->tstate
& 0xff);
2677 env
->tsptr
= &env
->ts
[env
->tl
& MAXTL_MASK
];
2681 void helper_flush(target_ulong addr
)
2684 tb_invalidate_page_range(addr
, addr
+ 8);
2687 #ifdef TARGET_SPARC64
2689 static const char * const excp_names
[0x80] = {
2690 [TT_TFAULT
] = "Instruction Access Fault",
2691 [TT_TMISS
] = "Instruction Access MMU Miss",
2692 [TT_CODE_ACCESS
] = "Instruction Access Error",
2693 [TT_ILL_INSN
] = "Illegal Instruction",
2694 [TT_PRIV_INSN
] = "Privileged Instruction",
2695 [TT_NFPU_INSN
] = "FPU Disabled",
2696 [TT_FP_EXCP
] = "FPU Exception",
2697 [TT_TOVF
] = "Tag Overflow",
2698 [TT_CLRWIN
] = "Clean Windows",
2699 [TT_DIV_ZERO
] = "Division By Zero",
2700 [TT_DFAULT
] = "Data Access Fault",
2701 [TT_DMISS
] = "Data Access MMU Miss",
2702 [TT_DATA_ACCESS
] = "Data Access Error",
2703 [TT_DPROT
] = "Data Protection Error",
2704 [TT_UNALIGNED
] = "Unaligned Memory Access",
2705 [TT_PRIV_ACT
] = "Privileged Action",
2706 [TT_EXTINT
| 0x1] = "External Interrupt 1",
2707 [TT_EXTINT
| 0x2] = "External Interrupt 2",
2708 [TT_EXTINT
| 0x3] = "External Interrupt 3",
2709 [TT_EXTINT
| 0x4] = "External Interrupt 4",
2710 [TT_EXTINT
| 0x5] = "External Interrupt 5",
2711 [TT_EXTINT
| 0x6] = "External Interrupt 6",
2712 [TT_EXTINT
| 0x7] = "External Interrupt 7",
2713 [TT_EXTINT
| 0x8] = "External Interrupt 8",
2714 [TT_EXTINT
| 0x9] = "External Interrupt 9",
2715 [TT_EXTINT
| 0xa] = "External Interrupt 10",
2716 [TT_EXTINT
| 0xb] = "External Interrupt 11",
2717 [TT_EXTINT
| 0xc] = "External Interrupt 12",
2718 [TT_EXTINT
| 0xd] = "External Interrupt 13",
2719 [TT_EXTINT
| 0xe] = "External Interrupt 14",
2720 [TT_EXTINT
| 0xf] = "External Interrupt 15",
2724 void do_interrupt(CPUState
*env
)
2726 int intno
= env
->exception_index
;
2729 if (loglevel
& CPU_LOG_INT
) {
2733 if (intno
< 0 || intno
>= 0x180)
2735 else if (intno
>= 0x100)
2736 name
= "Trap Instruction";
2737 else if (intno
>= 0xc0)
2738 name
= "Window Fill";
2739 else if (intno
>= 0x80)
2740 name
= "Window Spill";
2742 name
= excp_names
[intno
];
2747 fprintf(logfile
, "%6d: %s (v=%04x) pc=%016" PRIx64
" npc=%016" PRIx64
2748 " SP=%016" PRIx64
"\n",
2751 env
->npc
, env
->regwptr
[6]);
2752 cpu_dump_state(env
, logfile
, fprintf
, 0);
2758 fprintf(logfile
, " code=");
2759 ptr
= (uint8_t *)env
->pc
;
2760 for(i
= 0; i
< 16; i
++) {
2761 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
2763 fprintf(logfile
, "\n");
2769 #if !defined(CONFIG_USER_ONLY)
2770 if (env
->tl
>= env
->maxtl
) {
2771 cpu_abort(env
, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
2772 " Error state", env
->exception_index
, env
->tl
, env
->maxtl
);
2776 if (env
->tl
< env
->maxtl
- 1) {
2779 env
->pstate
|= PS_RED
;
2780 if (env
->tl
< env
->maxtl
)
2783 env
->tsptr
= &env
->ts
[env
->tl
& MAXTL_MASK
];
2784 env
->tsptr
->tstate
= ((uint64_t)GET_CCR(env
) << 32) |
2785 ((env
->asi
& 0xff) << 24) | ((env
->pstate
& 0xf3f) << 8) |
2787 env
->tsptr
->tpc
= env
->pc
;
2788 env
->tsptr
->tnpc
= env
->npc
;
2789 env
->tsptr
->tt
= intno
;
2790 if (!(env
->def
->features
& CPU_FEATURE_GL
)) {
2793 change_pstate(PS_PEF
| PS_PRIV
| PS_IG
);
2800 change_pstate(PS_PEF
| PS_PRIV
| PS_MG
);
2803 change_pstate(PS_PEF
| PS_PRIV
| PS_AG
);
2807 if (intno
== TT_CLRWIN
)
2808 cpu_set_cwp(env
, cpu_cwp_dec(env
, env
->cwp
- 1));
2809 else if ((intno
& 0x1c0) == TT_SPILL
)
2810 cpu_set_cwp(env
, cpu_cwp_dec(env
, env
->cwp
- env
->cansave
- 2));
2811 else if ((intno
& 0x1c0) == TT_FILL
)
2812 cpu_set_cwp(env
, cpu_cwp_inc(env
, env
->cwp
+ 1));
2813 env
->tbr
&= ~0x7fffULL
;
2814 env
->tbr
|= ((env
->tl
> 1) ? 1 << 14 : 0) | (intno
<< 5);
2816 env
->npc
= env
->pc
+ 4;
2817 env
->exception_index
= 0;
2821 static const char * const excp_names
[0x80] = {
2822 [TT_TFAULT
] = "Instruction Access Fault",
2823 [TT_ILL_INSN
] = "Illegal Instruction",
2824 [TT_PRIV_INSN
] = "Privileged Instruction",
2825 [TT_NFPU_INSN
] = "FPU Disabled",
2826 [TT_WIN_OVF
] = "Window Overflow",
2827 [TT_WIN_UNF
] = "Window Underflow",
2828 [TT_UNALIGNED
] = "Unaligned Memory Access",
2829 [TT_FP_EXCP
] = "FPU Exception",
2830 [TT_DFAULT
] = "Data Access Fault",
2831 [TT_TOVF
] = "Tag Overflow",
2832 [TT_EXTINT
| 0x1] = "External Interrupt 1",
2833 [TT_EXTINT
| 0x2] = "External Interrupt 2",
2834 [TT_EXTINT
| 0x3] = "External Interrupt 3",
2835 [TT_EXTINT
| 0x4] = "External Interrupt 4",
2836 [TT_EXTINT
| 0x5] = "External Interrupt 5",
2837 [TT_EXTINT
| 0x6] = "External Interrupt 6",
2838 [TT_EXTINT
| 0x7] = "External Interrupt 7",
2839 [TT_EXTINT
| 0x8] = "External Interrupt 8",
2840 [TT_EXTINT
| 0x9] = "External Interrupt 9",
2841 [TT_EXTINT
| 0xa] = "External Interrupt 10",
2842 [TT_EXTINT
| 0xb] = "External Interrupt 11",
2843 [TT_EXTINT
| 0xc] = "External Interrupt 12",
2844 [TT_EXTINT
| 0xd] = "External Interrupt 13",
2845 [TT_EXTINT
| 0xe] = "External Interrupt 14",
2846 [TT_EXTINT
| 0xf] = "External Interrupt 15",
2847 [TT_TOVF
] = "Tag Overflow",
2848 [TT_CODE_ACCESS
] = "Instruction Access Error",
2849 [TT_DATA_ACCESS
] = "Data Access Error",
2850 [TT_DIV_ZERO
] = "Division By Zero",
2851 [TT_NCP_INSN
] = "Coprocessor Disabled",
2855 void do_interrupt(CPUState
*env
)
2857 int cwp
, intno
= env
->exception_index
;
2860 if (loglevel
& CPU_LOG_INT
) {
2864 if (intno
< 0 || intno
>= 0x100)
2866 else if (intno
>= 0x80)
2867 name
= "Trap Instruction";
2869 name
= excp_names
[intno
];
2874 fprintf(logfile
, "%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
2877 env
->npc
, env
->regwptr
[6]);
2878 cpu_dump_state(env
, logfile
, fprintf
, 0);
2884 fprintf(logfile
, " code=");
2885 ptr
= (uint8_t *)env
->pc
;
2886 for(i
= 0; i
< 16; i
++) {
2887 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
2889 fprintf(logfile
, "\n");
2895 #if !defined(CONFIG_USER_ONLY)
2896 if (env
->psret
== 0) {
2897 cpu_abort(env
, "Trap 0x%02x while interrupts disabled, Error state",
2898 env
->exception_index
);
2903 cwp
= cpu_cwp_dec(env
, env
->cwp
- 1);
2904 cpu_set_cwp(env
, cwp
);
2905 env
->regwptr
[9] = env
->pc
;
2906 env
->regwptr
[10] = env
->npc
;
2907 env
->psrps
= env
->psrs
;
2909 env
->tbr
= (env
->tbr
& TBR_BASE_MASK
) | (intno
<< 4);
2911 env
->npc
= env
->pc
+ 4;
2912 env
->exception_index
= 0;
2916 #if !defined(CONFIG_USER_ONLY)
2918 static void do_unaligned_access(target_ulong addr
, int is_write
, int is_user
,
2921 #define MMUSUFFIX _mmu
2922 #define ALIGNED_ONLY
2925 #include "softmmu_template.h"
2928 #include "softmmu_template.h"
2931 #include "softmmu_template.h"
2934 #include "softmmu_template.h"
2936 /* XXX: make it generic ? */
2937 static void cpu_restore_state2(void *retaddr
)
2939 TranslationBlock
*tb
;
2943 /* now we have a real cpu fault */
2944 pc
= (unsigned long)retaddr
;
2945 tb
= tb_find_pc(pc
);
2947 /* the PC is inside the translated code. It means that we have
2948 a virtual CPU fault */
2949 cpu_restore_state(tb
, env
, pc
, (void *)(long)env
->cond
);
2954 static void do_unaligned_access(target_ulong addr
, int is_write
, int is_user
,
2957 #ifdef DEBUG_UNALIGNED
2958 printf("Unaligned access to 0x" TARGET_FMT_lx
" from 0x" TARGET_FMT_lx
2959 "\n", addr
, env
->pc
);
2961 cpu_restore_state2(retaddr
);
2962 raise_exception(TT_UNALIGNED
);
2965 /* try to fill the TLB and return an exception if error. If retaddr is
2966 NULL, it means that the function was called in C code (i.e. not
2967 from generated code or from helper.c) */
2968 /* XXX: fix it to restore all registers */
2969 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
2972 CPUState
*saved_env
;
2974 /* XXX: hack to restore env in all cases, even if not called from
2977 env
= cpu_single_env
;
2979 ret
= cpu_sparc_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
2981 cpu_restore_state2(retaddr
);
2989 #ifndef TARGET_SPARC64
2990 void do_unassigned_access(target_phys_addr_t addr
, int is_write
, int is_exec
,
2993 CPUState
*saved_env
;
2995 /* XXX: hack to restore env in all cases, even if not called from
2998 env
= cpu_single_env
;
2999 #ifdef DEBUG_UNASSIGNED
3001 printf("Unassigned mem %s access to " TARGET_FMT_plx
3002 " asi 0x%02x from " TARGET_FMT_lx
"\n",
3003 is_exec
? "exec" : is_write
? "write" : "read", addr
, is_asi
,
3006 printf("Unassigned mem %s access to " TARGET_FMT_plx
" from "
3008 is_exec
? "exec" : is_write
? "write" : "read", addr
, env
->pc
);
3010 if (env
->mmuregs
[3]) /* Fault status register */
3011 env
->mmuregs
[3] = 1; /* overflow (not read before another fault) */
3013 env
->mmuregs
[3] |= 1 << 16;
3015 env
->mmuregs
[3] |= 1 << 5;
3017 env
->mmuregs
[3] |= 1 << 6;
3019 env
->mmuregs
[3] |= 1 << 7;
3020 env
->mmuregs
[3] |= (5 << 2) | 2;
3021 env
->mmuregs
[4] = addr
; /* Fault address register */
3022 if ((env
->mmuregs
[0] & MMU_E
) && !(env
->mmuregs
[0] & MMU_NF
)) {
3024 raise_exception(TT_CODE_ACCESS
);
3026 raise_exception(TT_DATA_ACCESS
);
3031 void do_unassigned_access(target_phys_addr_t addr
, int is_write
, int is_exec
,
3034 #ifdef DEBUG_UNASSIGNED
3035 CPUState
*saved_env
;
3037 /* XXX: hack to restore env in all cases, even if not called from
3040 env
= cpu_single_env
;
3041 printf("Unassigned mem access to " TARGET_FMT_plx
" from " TARGET_FMT_lx
3042 "\n", addr
, env
->pc
);
3046 raise_exception(TT_CODE_ACCESS
);
3048 raise_exception(TT_DATA_ACCESS
);