2 #include "host-utils.h"
4 #if !defined(CONFIG_USER_ONLY)
5 #include "softmmu_exec.h"
6 #endif /* !defined(CONFIG_USER_ONLY) */
10 //#define DEBUG_UNALIGNED
11 //#define DEBUG_UNASSIGNED
16 #define DPRINTF_MMU(fmt, args...) \
17 do { printf("MMU: " fmt , ##args); } while (0)
19 #define DPRINTF_MMU(fmt, args...) do {} while (0)
23 #define DPRINTF_MXCC(fmt, args...) \
24 do { printf("MXCC: " fmt , ##args); } while (0)
26 #define DPRINTF_MXCC(fmt, args...) do {} while (0)
30 #define DPRINTF_ASI(fmt, args...) \
31 do { printf("ASI: " fmt , ##args); } while (0)
36 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
38 #define AM_CHECK(env1) (1)
42 static inline void address_mask(CPUState
*env1
, target_ulong
*addr
)
46 *addr
&= 0xffffffffULL
;
50 static void raise_exception(int tt
)
52 env
->exception_index
= tt
;
56 void HELPER(raise_exception
)(int tt
)
61 static inline void set_cwp(int new_cwp
)
63 cpu_set_cwp(env
, new_cwp
);
66 void helper_check_align(target_ulong addr
, uint32_t align
)
69 #ifdef DEBUG_UNALIGNED
70 printf("Unaligned access to 0x" TARGET_FMT_lx
" from 0x" TARGET_FMT_lx
73 raise_exception(TT_UNALIGNED
);
77 #define F_HELPER(name, p) void helper_f##name##p(void)
79 #define F_BINOP(name) \
80 float32 helper_f ## name ## s (float32 src1, float32 src2) \
82 return float32_ ## name (src1, src2, &env->fp_status); \
86 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
90 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
99 void helper_fsmuld(float32 src1
, float32 src2
)
101 DT0
= float64_mul(float32_to_float64(src1
, &env
->fp_status
),
102 float32_to_float64(src2
, &env
->fp_status
),
106 void helper_fdmulq(void)
108 QT0
= float128_mul(float64_to_float128(DT0
, &env
->fp_status
),
109 float64_to_float128(DT1
, &env
->fp_status
),
113 float32
helper_fnegs(float32 src
)
115 return float32_chs(src
);
118 #ifdef TARGET_SPARC64
121 DT0
= float64_chs(DT1
);
126 QT0
= float128_chs(QT1
);
130 /* Integer to float conversion. */
131 float32
helper_fitos(int32_t src
)
133 return int32_to_float32(src
, &env
->fp_status
);
136 void helper_fitod(int32_t src
)
138 DT0
= int32_to_float64(src
, &env
->fp_status
);
141 void helper_fitoq(int32_t src
)
143 QT0
= int32_to_float128(src
, &env
->fp_status
);
146 #ifdef TARGET_SPARC64
147 float32
helper_fxtos(void)
149 return int64_to_float32(*((int64_t *)&DT1
), &env
->fp_status
);
154 DT0
= int64_to_float64(*((int64_t *)&DT1
), &env
->fp_status
);
159 QT0
= int64_to_float128(*((int64_t *)&DT1
), &env
->fp_status
);
164 /* floating point conversion */
165 float32
helper_fdtos(void)
167 return float64_to_float32(DT1
, &env
->fp_status
);
170 void helper_fstod(float32 src
)
172 DT0
= float32_to_float64(src
, &env
->fp_status
);
175 float32
helper_fqtos(void)
177 return float128_to_float32(QT1
, &env
->fp_status
);
180 void helper_fstoq(float32 src
)
182 QT0
= float32_to_float128(src
, &env
->fp_status
);
185 void helper_fqtod(void)
187 DT0
= float128_to_float64(QT1
, &env
->fp_status
);
190 void helper_fdtoq(void)
192 QT0
= float64_to_float128(DT1
, &env
->fp_status
);
195 /* Float to integer conversion. */
196 int32_t helper_fstoi(float32 src
)
198 return float32_to_int32_round_to_zero(src
, &env
->fp_status
);
201 int32_t helper_fdtoi(void)
203 return float64_to_int32_round_to_zero(DT1
, &env
->fp_status
);
206 int32_t helper_fqtoi(void)
208 return float128_to_int32_round_to_zero(QT1
, &env
->fp_status
);
211 #ifdef TARGET_SPARC64
212 void helper_fstox(float32 src
)
214 *((int64_t *)&DT0
) = float32_to_int64_round_to_zero(src
, &env
->fp_status
);
217 void helper_fdtox(void)
219 *((int64_t *)&DT0
) = float64_to_int64_round_to_zero(DT1
, &env
->fp_status
);
222 void helper_fqtox(void)
224 *((int64_t *)&DT0
) = float128_to_int64_round_to_zero(QT1
, &env
->fp_status
);
227 void helper_faligndata(void)
231 tmp
= (*((uint64_t *)&DT0
)) << ((env
->gsr
& 7) * 8);
232 /* on many architectures a shift of 64 does nothing */
233 if ((env
->gsr
& 7) != 0) {
234 tmp
|= (*((uint64_t *)&DT1
)) >> (64 - (env
->gsr
& 7) * 8);
236 *((uint64_t *)&DT0
) = tmp
;
239 #ifdef WORDS_BIGENDIAN
240 #define VIS_B64(n) b[7 - (n)]
241 #define VIS_W64(n) w[3 - (n)]
242 #define VIS_SW64(n) sw[3 - (n)]
243 #define VIS_L64(n) l[1 - (n)]
244 #define VIS_B32(n) b[3 - (n)]
245 #define VIS_W32(n) w[1 - (n)]
247 #define VIS_B64(n) b[n]
248 #define VIS_W64(n) w[n]
249 #define VIS_SW64(n) sw[n]
250 #define VIS_L64(n) l[n]
251 #define VIS_B32(n) b[n]
252 #define VIS_W32(n) w[n]
270 void helper_fpmerge(void)
277 // Reverse calculation order to handle overlap
278 d
.VIS_B64(7) = s
.VIS_B64(3);
279 d
.VIS_B64(6) = d
.VIS_B64(3);
280 d
.VIS_B64(5) = s
.VIS_B64(2);
281 d
.VIS_B64(4) = d
.VIS_B64(2);
282 d
.VIS_B64(3) = s
.VIS_B64(1);
283 d
.VIS_B64(2) = d
.VIS_B64(1);
284 d
.VIS_B64(1) = s
.VIS_B64(0);
285 //d.VIS_B64(0) = d.VIS_B64(0);
290 void helper_fmul8x16(void)
299 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
300 if ((tmp & 0xff) > 0x7f) \
302 d.VIS_W64(r) = tmp >> 8;
313 void helper_fmul8x16al(void)
322 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
323 if ((tmp & 0xff) > 0x7f) \
325 d.VIS_W64(r) = tmp >> 8;
336 void helper_fmul8x16au(void)
345 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
346 if ((tmp & 0xff) > 0x7f) \
348 d.VIS_W64(r) = tmp >> 8;
359 void helper_fmul8sux16(void)
368 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
369 if ((tmp & 0xff) > 0x7f) \
371 d.VIS_W64(r) = tmp >> 8;
382 void helper_fmul8ulx16(void)
391 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
392 if ((tmp & 0xff) > 0x7f) \
394 d.VIS_W64(r) = tmp >> 8;
405 void helper_fmuld8sux16(void)
414 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
415 if ((tmp & 0xff) > 0x7f) \
419 // Reverse calculation order to handle overlap
427 void helper_fmuld8ulx16(void)
436 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
437 if ((tmp & 0xff) > 0x7f) \
441 // Reverse calculation order to handle overlap
449 void helper_fexpand(void)
454 s
.l
= (uint32_t)(*(uint64_t *)&DT0
& 0xffffffff);
456 d
.VIS_W64(0) = s
.VIS_B32(0) << 4;
457 d
.VIS_W64(1) = s
.VIS_B32(1) << 4;
458 d
.VIS_W64(2) = s
.VIS_B32(2) << 4;
459 d
.VIS_W64(3) = s
.VIS_B32(3) << 4;
464 #define VIS_HELPER(name, F) \
465 void name##16(void) \
472 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
473 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
474 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
475 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
480 uint32_t name##16s(uint32_t src1, uint32_t src2) \
487 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
488 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
493 void name##32(void) \
500 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
501 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
506 uint32_t name##32s(uint32_t src1, uint32_t src2) \
518 #define FADD(a, b) ((a) + (b))
519 #define FSUB(a, b) ((a) - (b))
520 VIS_HELPER(helper_fpadd
, FADD
)
521 VIS_HELPER(helper_fpsub
, FSUB
)
523 #define VIS_CMPHELPER(name, F) \
524 void name##16(void) \
531 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
532 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
533 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
534 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
539 void name##32(void) \
546 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
547 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
552 #define FCMPGT(a, b) ((a) > (b))
553 #define FCMPEQ(a, b) ((a) == (b))
554 #define FCMPLE(a, b) ((a) <= (b))
555 #define FCMPNE(a, b) ((a) != (b))
557 VIS_CMPHELPER(helper_fcmpgt
, FCMPGT
)
558 VIS_CMPHELPER(helper_fcmpeq
, FCMPEQ
)
559 VIS_CMPHELPER(helper_fcmple
, FCMPLE
)
560 VIS_CMPHELPER(helper_fcmpne
, FCMPNE
)
563 void helper_check_ieee_exceptions(void)
567 status
= get_float_exception_flags(&env
->fp_status
);
569 /* Copy IEEE 754 flags into FSR */
570 if (status
& float_flag_invalid
)
572 if (status
& float_flag_overflow
)
574 if (status
& float_flag_underflow
)
576 if (status
& float_flag_divbyzero
)
578 if (status
& float_flag_inexact
)
581 if ((env
->fsr
& FSR_CEXC_MASK
) & ((env
->fsr
& FSR_TEM_MASK
) >> 23)) {
582 /* Unmasked exception, generate a trap */
583 env
->fsr
|= FSR_FTT_IEEE_EXCP
;
584 raise_exception(TT_FP_EXCP
);
586 /* Accumulate exceptions */
587 env
->fsr
|= (env
->fsr
& FSR_CEXC_MASK
) << 5;
592 void helper_clear_float_exceptions(void)
594 set_float_exception_flags(0, &env
->fp_status
);
597 float32
helper_fabss(float32 src
)
599 return float32_abs(src
);
602 #ifdef TARGET_SPARC64
603 void helper_fabsd(void)
605 DT0
= float64_abs(DT1
);
608 void helper_fabsq(void)
610 QT0
= float128_abs(QT1
);
614 float32
helper_fsqrts(float32 src
)
616 return float32_sqrt(src
, &env
->fp_status
);
619 void helper_fsqrtd(void)
621 DT0
= float64_sqrt(DT1
, &env
->fp_status
);
624 void helper_fsqrtq(void)
626 QT0
= float128_sqrt(QT1
, &env
->fp_status
);
629 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
630 void glue(helper_, name) (void) \
632 target_ulong new_fsr; \
634 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
635 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
636 case float_relation_unordered: \
637 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
638 if ((env->fsr & FSR_NVM) || TRAP) { \
639 env->fsr |= new_fsr; \
640 env->fsr |= FSR_NVC; \
641 env->fsr |= FSR_FTT_IEEE_EXCP; \
642 raise_exception(TT_FP_EXCP); \
644 env->fsr |= FSR_NVA; \
647 case float_relation_less: \
648 new_fsr = FSR_FCC0 << FS; \
650 case float_relation_greater: \
651 new_fsr = FSR_FCC1 << FS; \
657 env->fsr |= new_fsr; \
659 #define GEN_FCMPS(name, size, FS, TRAP) \
660 void glue(helper_, name)(float32 src1, float32 src2) \
662 target_ulong new_fsr; \
664 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
665 switch (glue(size, _compare) (src1, src2, &env->fp_status)) { \
666 case float_relation_unordered: \
667 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
668 if ((env->fsr & FSR_NVM) || TRAP) { \
669 env->fsr |= new_fsr; \
670 env->fsr |= FSR_NVC; \
671 env->fsr |= FSR_FTT_IEEE_EXCP; \
672 raise_exception(TT_FP_EXCP); \
674 env->fsr |= FSR_NVA; \
677 case float_relation_less: \
678 new_fsr = FSR_FCC0 << FS; \
680 case float_relation_greater: \
681 new_fsr = FSR_FCC1 << FS; \
687 env->fsr |= new_fsr; \
690 GEN_FCMPS(fcmps
, float32
, 0, 0);
691 GEN_FCMP(fcmpd
, float64
, DT0
, DT1
, 0, 0);
693 GEN_FCMPS(fcmpes
, float32
, 0, 1);
694 GEN_FCMP(fcmped
, float64
, DT0
, DT1
, 0, 1);
696 GEN_FCMP(fcmpq
, float128
, QT0
, QT1
, 0, 0);
697 GEN_FCMP(fcmpeq
, float128
, QT0
, QT1
, 0, 1);
699 #ifdef TARGET_SPARC64
700 GEN_FCMPS(fcmps_fcc1
, float32
, 22, 0);
701 GEN_FCMP(fcmpd_fcc1
, float64
, DT0
, DT1
, 22, 0);
702 GEN_FCMP(fcmpq_fcc1
, float128
, QT0
, QT1
, 22, 0);
704 GEN_FCMPS(fcmps_fcc2
, float32
, 24, 0);
705 GEN_FCMP(fcmpd_fcc2
, float64
, DT0
, DT1
, 24, 0);
706 GEN_FCMP(fcmpq_fcc2
, float128
, QT0
, QT1
, 24, 0);
708 GEN_FCMPS(fcmps_fcc3
, float32
, 26, 0);
709 GEN_FCMP(fcmpd_fcc3
, float64
, DT0
, DT1
, 26, 0);
710 GEN_FCMP(fcmpq_fcc3
, float128
, QT0
, QT1
, 26, 0);
712 GEN_FCMPS(fcmpes_fcc1
, float32
, 22, 1);
713 GEN_FCMP(fcmped_fcc1
, float64
, DT0
, DT1
, 22, 1);
714 GEN_FCMP(fcmpeq_fcc1
, float128
, QT0
, QT1
, 22, 1);
716 GEN_FCMPS(fcmpes_fcc2
, float32
, 24, 1);
717 GEN_FCMP(fcmped_fcc2
, float64
, DT0
, DT1
, 24, 1);
718 GEN_FCMP(fcmpeq_fcc2
, float128
, QT0
, QT1
, 24, 1);
720 GEN_FCMPS(fcmpes_fcc3
, float32
, 26, 1);
721 GEN_FCMP(fcmped_fcc3
, float64
, DT0
, DT1
, 26, 1);
722 GEN_FCMP(fcmpeq_fcc3
, float128
, QT0
, QT1
, 26, 1);
726 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
728 static void dump_mxcc(CPUState
*env
)
730 printf("mxccdata: %016llx %016llx %016llx %016llx\n",
731 env
->mxccdata
[0], env
->mxccdata
[1],
732 env
->mxccdata
[2], env
->mxccdata
[3]);
733 printf("mxccregs: %016llx %016llx %016llx %016llx\n"
734 " %016llx %016llx %016llx %016llx\n",
735 env
->mxccregs
[0], env
->mxccregs
[1],
736 env
->mxccregs
[2], env
->mxccregs
[3],
737 env
->mxccregs
[4], env
->mxccregs
[5],
738 env
->mxccregs
[6], env
->mxccregs
[7]);
742 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
743 && defined(DEBUG_ASI)
744 static void dump_asi(const char *txt
, target_ulong addr
, int asi
, int size
,
750 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %02" PRIx64
"\n", txt
,
751 addr
, asi
, r1
& 0xff);
754 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %04" PRIx64
"\n", txt
,
755 addr
, asi
, r1
& 0xffff);
758 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %08" PRIx64
"\n", txt
,
759 addr
, asi
, r1
& 0xffffffff);
762 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %016" PRIx64
"\n", txt
,
769 #ifndef TARGET_SPARC64
770 #ifndef CONFIG_USER_ONLY
771 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
774 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
775 uint32_t last_addr
= addr
;
778 helper_check_align(addr
, size
- 1);
780 case 2: /* SuperSparc MXCC registers */
782 case 0x01c00a00: /* MXCC control register */
784 ret
= env
->mxccregs
[3];
786 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
789 case 0x01c00a04: /* MXCC control register */
791 ret
= env
->mxccregs
[3];
793 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
796 case 0x01c00c00: /* Module reset register */
798 ret
= env
->mxccregs
[5];
799 // should we do something here?
801 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
804 case 0x01c00f00: /* MBus port address register */
806 ret
= env
->mxccregs
[7];
808 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
812 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr
,
816 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
817 "addr = %08x -> ret = %" PRIx64
","
818 "addr = %08x\n", asi
, size
, sign
, last_addr
, ret
, addr
);
823 case 3: /* MMU probe */
827 mmulev
= (addr
>> 8) & 15;
831 ret
= mmu_probe(env
, addr
, mmulev
);
832 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64
"\n",
836 case 4: /* read MMU regs */
838 int reg
= (addr
>> 8) & 0x1f;
840 ret
= env
->mmuregs
[reg
];
841 if (reg
== 3) /* Fault status cleared on read */
843 else if (reg
== 0x13) /* Fault status read */
844 ret
= env
->mmuregs
[3];
845 else if (reg
== 0x14) /* Fault address read */
846 ret
= env
->mmuregs
[4];
847 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64
"\n", reg
, ret
);
850 case 5: // Turbosparc ITLB Diagnostic
851 case 6: // Turbosparc DTLB Diagnostic
852 case 7: // Turbosparc IOTLB Diagnostic
854 case 9: /* Supervisor code access */
857 ret
= ldub_code(addr
);
860 ret
= lduw_code(addr
);
864 ret
= ldl_code(addr
);
867 ret
= ldq_code(addr
);
871 case 0xa: /* User data access */
874 ret
= ldub_user(addr
);
877 ret
= lduw_user(addr
);
881 ret
= ldl_user(addr
);
884 ret
= ldq_user(addr
);
888 case 0xb: /* Supervisor data access */
891 ret
= ldub_kernel(addr
);
894 ret
= lduw_kernel(addr
);
898 ret
= ldl_kernel(addr
);
901 ret
= ldq_kernel(addr
);
905 case 0xc: /* I-cache tag */
906 case 0xd: /* I-cache data */
907 case 0xe: /* D-cache tag */
908 case 0xf: /* D-cache data */
910 case 0x20: /* MMU passthrough */
913 ret
= ldub_phys(addr
);
916 ret
= lduw_phys(addr
);
920 ret
= ldl_phys(addr
);
923 ret
= ldq_phys(addr
);
927 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
930 ret
= ldub_phys((target_phys_addr_t
)addr
931 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
934 ret
= lduw_phys((target_phys_addr_t
)addr
935 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
939 ret
= ldl_phys((target_phys_addr_t
)addr
940 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
943 ret
= ldq_phys((target_phys_addr_t
)addr
944 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
948 case 0x30: // Turbosparc secondary cache diagnostic
949 case 0x31: // Turbosparc RAM snoop
950 case 0x32: // Turbosparc page table descriptor diagnostic
951 case 0x39: /* data cache diagnostic register */
954 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
956 int reg
= (addr
>> 8) & 3;
959 case 0: /* Breakpoint Value (Addr) */
960 ret
= env
->mmubpregs
[reg
];
962 case 1: /* Breakpoint Mask */
963 ret
= env
->mmubpregs
[reg
];
965 case 2: /* Breakpoint Control */
966 ret
= env
->mmubpregs
[reg
];
968 case 3: /* Breakpoint Status */
969 ret
= env
->mmubpregs
[reg
];
970 env
->mmubpregs
[reg
] = 0ULL;
973 DPRINTF_MMU("read breakpoint reg[%d] 0x%016llx\n", reg
, ret
);
976 case 8: /* User code access, XXX */
978 do_unassigned_access(addr
, 0, 0, asi
, size
);
998 dump_asi("read ", last_addr
, asi
, size
, ret
);
1003 void helper_st_asi(target_ulong addr
, uint64_t val
, int asi
, int size
)
1005 helper_check_align(addr
, size
- 1);
1007 case 2: /* SuperSparc MXCC registers */
1009 case 0x01c00000: /* MXCC stream data register 0 */
1011 env
->mxccdata
[0] = val
;
1013 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1016 case 0x01c00008: /* MXCC stream data register 1 */
1018 env
->mxccdata
[1] = val
;
1020 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1023 case 0x01c00010: /* MXCC stream data register 2 */
1025 env
->mxccdata
[2] = val
;
1027 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1030 case 0x01c00018: /* MXCC stream data register 3 */
1032 env
->mxccdata
[3] = val
;
1034 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1037 case 0x01c00100: /* MXCC stream source */
1039 env
->mxccregs
[0] = val
;
1041 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1043 env
->mxccdata
[0] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1045 env
->mxccdata
[1] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1047 env
->mxccdata
[2] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1049 env
->mxccdata
[3] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1052 case 0x01c00200: /* MXCC stream destination */
1054 env
->mxccregs
[1] = val
;
1056 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1058 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 0,
1060 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 8,
1062 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 16,
1064 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 24,
1067 case 0x01c00a00: /* MXCC control register */
1069 env
->mxccregs
[3] = val
;
1071 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1074 case 0x01c00a04: /* MXCC control register */
1076 env
->mxccregs
[3] = (env
->mxccregs
[3] & 0xffffffff00000000ULL
)
1079 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1082 case 0x01c00e00: /* MXCC error register */
1083 // writing a 1 bit clears the error
1085 env
->mxccregs
[6] &= ~val
;
1087 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1090 case 0x01c00f00: /* MBus port address register */
1092 env
->mxccregs
[7] = val
;
1094 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1098 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr
,
1102 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64
"\n",
1103 asi
, size
, addr
, val
);
1108 case 3: /* MMU flush */
1112 mmulev
= (addr
>> 8) & 15;
1113 DPRINTF_MMU("mmu flush level %d\n", mmulev
);
1115 case 0: // flush page
1116 tlb_flush_page(env
, addr
& 0xfffff000);
1118 case 1: // flush segment (256k)
1119 case 2: // flush region (16M)
1120 case 3: // flush context (4G)
1121 case 4: // flush entire
1132 case 4: /* write MMU regs */
1134 int reg
= (addr
>> 8) & 0x1f;
1137 oldreg
= env
->mmuregs
[reg
];
1139 case 0: // Control Register
1140 env
->mmuregs
[reg
] = (env
->mmuregs
[reg
] & 0xff000000) |
1142 // Mappings generated during no-fault mode or MMU
1143 // disabled mode are invalid in normal mode
1144 if ((oldreg
& (MMU_E
| MMU_NF
| env
->def
->mmu_bm
)) !=
1145 (env
->mmuregs
[reg
] & (MMU_E
| MMU_NF
| env
->def
->mmu_bm
)))
1148 case 1: // Context Table Pointer Register
1149 env
->mmuregs
[reg
] = val
& env
->def
->mmu_ctpr_mask
;
1151 case 2: // Context Register
1152 env
->mmuregs
[reg
] = val
& env
->def
->mmu_cxr_mask
;
1153 if (oldreg
!= env
->mmuregs
[reg
]) {
1154 /* we flush when the MMU context changes because
1155 QEMU has no MMU context support */
1159 case 3: // Synchronous Fault Status Register with Clear
1160 case 4: // Synchronous Fault Address Register
1162 case 0x10: // TLB Replacement Control Register
1163 env
->mmuregs
[reg
] = val
& env
->def
->mmu_trcr_mask
;
1165 case 0x13: // Synchronous Fault Status Register with Read and Clear
1166 env
->mmuregs
[3] = val
& env
->def
->mmu_sfsr_mask
;
1168 case 0x14: // Synchronous Fault Address Register
1169 env
->mmuregs
[4] = val
;
1172 env
->mmuregs
[reg
] = val
;
1175 if (oldreg
!= env
->mmuregs
[reg
]) {
1176 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1177 reg
, oldreg
, env
->mmuregs
[reg
]);
1184 case 5: // Turbosparc ITLB Diagnostic
1185 case 6: // Turbosparc DTLB Diagnostic
1186 case 7: // Turbosparc IOTLB Diagnostic
1188 case 0xa: /* User data access */
1191 stb_user(addr
, val
);
1194 stw_user(addr
, val
);
1198 stl_user(addr
, val
);
1201 stq_user(addr
, val
);
1205 case 0xb: /* Supervisor data access */
1208 stb_kernel(addr
, val
);
1211 stw_kernel(addr
, val
);
1215 stl_kernel(addr
, val
);
1218 stq_kernel(addr
, val
);
1222 case 0xc: /* I-cache tag */
1223 case 0xd: /* I-cache data */
1224 case 0xe: /* D-cache tag */
1225 case 0xf: /* D-cache data */
1226 case 0x10: /* I/D-cache flush page */
1227 case 0x11: /* I/D-cache flush segment */
1228 case 0x12: /* I/D-cache flush region */
1229 case 0x13: /* I/D-cache flush context */
1230 case 0x14: /* I/D-cache flush user */
1232 case 0x17: /* Block copy, sta access */
1238 uint32_t src
= val
& ~3, dst
= addr
& ~3, temp
;
1240 for (i
= 0; i
< 32; i
+= 4, src
+= 4, dst
+= 4) {
1241 temp
= ldl_kernel(src
);
1242 stl_kernel(dst
, temp
);
1246 case 0x1f: /* Block fill, stda access */
1249 // fill 32 bytes with val
1251 uint32_t dst
= addr
& 7;
1253 for (i
= 0; i
< 32; i
+= 8, dst
+= 8)
1254 stq_kernel(dst
, val
);
1257 case 0x20: /* MMU passthrough */
1261 stb_phys(addr
, val
);
1264 stw_phys(addr
, val
);
1268 stl_phys(addr
, val
);
1271 stq_phys(addr
, val
);
1276 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1280 stb_phys((target_phys_addr_t
)addr
1281 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1284 stw_phys((target_phys_addr_t
)addr
1285 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1289 stl_phys((target_phys_addr_t
)addr
1290 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1293 stq_phys((target_phys_addr_t
)addr
1294 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1299 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1300 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1301 // Turbosparc snoop RAM
1302 case 0x32: // store buffer control or Turbosparc page table
1303 // descriptor diagnostic
1304 case 0x36: /* I-cache flash clear */
1305 case 0x37: /* D-cache flash clear */
1306 case 0x4c: /* breakpoint action */
1308 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1310 int reg
= (addr
>> 8) & 3;
1313 case 0: /* Breakpoint Value (Addr) */
1314 env
->mmubpregs
[reg
] = (val
& 0xfffffffffULL
);
1316 case 1: /* Breakpoint Mask */
1317 env
->mmubpregs
[reg
] = (val
& 0xfffffffffULL
);
1319 case 2: /* Breakpoint Control */
1320 env
->mmubpregs
[reg
] = (val
& 0x7fULL
);
1322 case 3: /* Breakpoint Status */
1323 env
->mmubpregs
[reg
] = (val
& 0xfULL
);
1326 DPRINTF_MMU("write breakpoint reg[%d] 0x%016llx\n", reg
,
1330 case 8: /* User code access, XXX */
1331 case 9: /* Supervisor code access, XXX */
1333 do_unassigned_access(addr
, 1, 0, asi
, size
);
1337 dump_asi("write", addr
, asi
, size
, val
);
1341 #endif /* CONFIG_USER_ONLY */
1342 #else /* TARGET_SPARC64 */
1344 #ifdef CONFIG_USER_ONLY
1345 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
1348 #if defined(DEBUG_ASI)
1349 target_ulong last_addr
= addr
;
1353 raise_exception(TT_PRIV_ACT
);
1355 helper_check_align(addr
, size
- 1);
1356 address_mask(env
, &addr
);
1359 case 0x82: // Primary no-fault
1360 case 0x8a: // Primary no-fault LE
1361 if (page_check_range(addr
, size
, PAGE_READ
) == -1) {
1363 dump_asi("read ", last_addr
, asi
, size
, ret
);
1368 case 0x80: // Primary
1369 case 0x88: // Primary LE
1373 ret
= ldub_raw(addr
);
1376 ret
= lduw_raw(addr
);
1379 ret
= ldl_raw(addr
);
1383 ret
= ldq_raw(addr
);
1388 case 0x83: // Secondary no-fault
1389 case 0x8b: // Secondary no-fault LE
1390 if (page_check_range(addr
, size
, PAGE_READ
) == -1) {
1392 dump_asi("read ", last_addr
, asi
, size
, ret
);
1397 case 0x81: // Secondary
1398 case 0x89: // Secondary LE
1405 /* Convert from little endian */
1407 case 0x88: // Primary LE
1408 case 0x89: // Secondary LE
1409 case 0x8a: // Primary no-fault LE
1410 case 0x8b: // Secondary no-fault LE
1428 /* Convert to signed number */
1435 ret
= (int16_t) ret
;
1438 ret
= (int32_t) ret
;
1445 dump_asi("read ", last_addr
, asi
, size
, ret
);
1450 void helper_st_asi(target_ulong addr
, target_ulong val
, int asi
, int size
)
1453 dump_asi("write", addr
, asi
, size
, val
);
1456 raise_exception(TT_PRIV_ACT
);
1458 helper_check_align(addr
, size
- 1);
1459 address_mask(env
, &addr
);
1461 /* Convert to little endian */
1463 case 0x88: // Primary LE
1464 case 0x89: // Secondary LE
1467 addr
= bswap16(addr
);
1470 addr
= bswap32(addr
);
1473 addr
= bswap64(addr
);
1483 case 0x80: // Primary
1484 case 0x88: // Primary LE
1503 case 0x81: // Secondary
1504 case 0x89: // Secondary LE
1508 case 0x82: // Primary no-fault, RO
1509 case 0x83: // Secondary no-fault, RO
1510 case 0x8a: // Primary no-fault LE, RO
1511 case 0x8b: // Secondary no-fault LE, RO
1513 do_unassigned_access(addr
, 1, 0, 1, size
);
1518 #else /* CONFIG_USER_ONLY */
1520 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
1523 #if defined(DEBUG_ASI)
1524 target_ulong last_addr
= addr
;
1527 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
1528 || ((env
->def
->features
& CPU_FEATURE_HYPV
)
1529 && asi
>= 0x30 && asi
< 0x80
1530 && !(env
->hpstate
& HS_PRIV
)))
1531 raise_exception(TT_PRIV_ACT
);
1533 helper_check_align(addr
, size
- 1);
1535 case 0x82: // Primary no-fault
1536 case 0x8a: // Primary no-fault LE
1537 if (cpu_get_phys_page_debug(env
, addr
) == -1ULL) {
1539 dump_asi("read ", last_addr
, asi
, size
, ret
);
1544 case 0x10: // As if user primary
1545 case 0x18: // As if user primary LE
1546 case 0x80: // Primary
1547 case 0x88: // Primary LE
1548 case 0xe2: // UA2007 Primary block init
1549 case 0xe3: // UA2007 Secondary block init
1550 if ((asi
& 0x80) && (env
->pstate
& PS_PRIV
)) {
1551 if ((env
->def
->features
& CPU_FEATURE_HYPV
)
1552 && env
->hpstate
& HS_PRIV
) {
1555 ret
= ldub_hypv(addr
);
1558 ret
= lduw_hypv(addr
);
1561 ret
= ldl_hypv(addr
);
1565 ret
= ldq_hypv(addr
);
1571 ret
= ldub_kernel(addr
);
1574 ret
= lduw_kernel(addr
);
1577 ret
= ldl_kernel(addr
);
1581 ret
= ldq_kernel(addr
);
1588 ret
= ldub_user(addr
);
1591 ret
= lduw_user(addr
);
1594 ret
= ldl_user(addr
);
1598 ret
= ldq_user(addr
);
1603 case 0x14: // Bypass
1604 case 0x15: // Bypass, non-cacheable
1605 case 0x1c: // Bypass LE
1606 case 0x1d: // Bypass, non-cacheable LE
1610 ret
= ldub_phys(addr
);
1613 ret
= lduw_phys(addr
);
1616 ret
= ldl_phys(addr
);
1620 ret
= ldq_phys(addr
);
1625 case 0x24: // Nucleus quad LDD 128 bit atomic
1626 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1627 // Only ldda allowed
1628 raise_exception(TT_ILL_INSN
);
1630 case 0x83: // Secondary no-fault
1631 case 0x8b: // Secondary no-fault LE
1632 if (cpu_get_phys_page_debug(env
, addr
) == -1ULL) {
1634 dump_asi("read ", last_addr
, asi
, size
, ret
);
1639 case 0x04: // Nucleus
1640 case 0x0c: // Nucleus Little Endian (LE)
1641 case 0x11: // As if user secondary
1642 case 0x19: // As if user secondary LE
1643 case 0x4a: // UPA config
1644 case 0x81: // Secondary
1645 case 0x89: // Secondary LE
1651 case 0x50: // I-MMU regs
1653 int reg
= (addr
>> 3) & 0xf;
1655 ret
= env
->immuregs
[reg
];
1658 case 0x51: // I-MMU 8k TSB pointer
1659 case 0x52: // I-MMU 64k TSB pointer
1662 case 0x55: // I-MMU data access
1664 int reg
= (addr
>> 3) & 0x3f;
1666 ret
= env
->itlb_tte
[reg
];
1669 case 0x56: // I-MMU tag read
1671 int reg
= (addr
>> 3) & 0x3f;
1673 ret
= env
->itlb_tag
[reg
];
1676 case 0x58: // D-MMU regs
1678 int reg
= (addr
>> 3) & 0xf;
1680 ret
= env
->dmmuregs
[reg
];
1683 case 0x5d: // D-MMU data access
1685 int reg
= (addr
>> 3) & 0x3f;
1687 ret
= env
->dtlb_tte
[reg
];
1690 case 0x5e: // D-MMU tag read
1692 int reg
= (addr
>> 3) & 0x3f;
1694 ret
= env
->dtlb_tag
[reg
];
1697 case 0x46: // D-cache data
1698 case 0x47: // D-cache tag access
1699 case 0x4b: // E-cache error enable
1700 case 0x4c: // E-cache asynchronous fault status
1701 case 0x4d: // E-cache asynchronous fault address
1702 case 0x4e: // E-cache tag data
1703 case 0x66: // I-cache instruction access
1704 case 0x67: // I-cache tag access
1705 case 0x6e: // I-cache predecode
1706 case 0x6f: // I-cache LRU etc.
1707 case 0x76: // E-cache tag
1708 case 0x7e: // E-cache tag
1710 case 0x59: // D-MMU 8k TSB pointer
1711 case 0x5a: // D-MMU 64k TSB pointer
1712 case 0x5b: // D-MMU data pointer
1713 case 0x48: // Interrupt dispatch, RO
1714 case 0x49: // Interrupt data receive
1715 case 0x7f: // Incoming interrupt vector, RO
1718 case 0x54: // I-MMU data in, WO
1719 case 0x57: // I-MMU demap, WO
1720 case 0x5c: // D-MMU data in, WO
1721 case 0x5f: // D-MMU demap, WO
1722 case 0x77: // Interrupt vector, WO
1724 do_unassigned_access(addr
, 0, 0, 1, size
);
1729 /* Convert from little endian */
1731 case 0x0c: // Nucleus Little Endian (LE)
1732 case 0x18: // As if user primary LE
1733 case 0x19: // As if user secondary LE
1734 case 0x1c: // Bypass LE
1735 case 0x1d: // Bypass, non-cacheable LE
1736 case 0x88: // Primary LE
1737 case 0x89: // Secondary LE
1738 case 0x8a: // Primary no-fault LE
1739 case 0x8b: // Secondary no-fault LE
1757 /* Convert to signed number */
1764 ret
= (int16_t) ret
;
1767 ret
= (int32_t) ret
;
1774 dump_asi("read ", last_addr
, asi
, size
, ret
);
1779 void helper_st_asi(target_ulong addr
, target_ulong val
, int asi
, int size
)
1782 dump_asi("write", addr
, asi
, size
, val
);
1784 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
1785 || ((env
->def
->features
& CPU_FEATURE_HYPV
)
1786 && asi
>= 0x30 && asi
< 0x80
1787 && !(env
->hpstate
& HS_PRIV
)))
1788 raise_exception(TT_PRIV_ACT
);
1790 helper_check_align(addr
, size
- 1);
1791 /* Convert to little endian */
1793 case 0x0c: // Nucleus Little Endian (LE)
1794 case 0x18: // As if user primary LE
1795 case 0x19: // As if user secondary LE
1796 case 0x1c: // Bypass LE
1797 case 0x1d: // Bypass, non-cacheable LE
1798 case 0x88: // Primary LE
1799 case 0x89: // Secondary LE
1802 addr
= bswap16(addr
);
1805 addr
= bswap32(addr
);
1808 addr
= bswap64(addr
);
1818 case 0x10: // As if user primary
1819 case 0x18: // As if user primary LE
1820 case 0x80: // Primary
1821 case 0x88: // Primary LE
1822 case 0xe2: // UA2007 Primary block init
1823 case 0xe3: // UA2007 Secondary block init
1824 if ((asi
& 0x80) && (env
->pstate
& PS_PRIV
)) {
1825 if ((env
->def
->features
& CPU_FEATURE_HYPV
)
1826 && env
->hpstate
& HS_PRIV
) {
1829 stb_hypv(addr
, val
);
1832 stw_hypv(addr
, val
);
1835 stl_hypv(addr
, val
);
1839 stq_hypv(addr
, val
);
1845 stb_kernel(addr
, val
);
1848 stw_kernel(addr
, val
);
1851 stl_kernel(addr
, val
);
1855 stq_kernel(addr
, val
);
1862 stb_user(addr
, val
);
1865 stw_user(addr
, val
);
1868 stl_user(addr
, val
);
1872 stq_user(addr
, val
);
1877 case 0x14: // Bypass
1878 case 0x15: // Bypass, non-cacheable
1879 case 0x1c: // Bypass LE
1880 case 0x1d: // Bypass, non-cacheable LE
1884 stb_phys(addr
, val
);
1887 stw_phys(addr
, val
);
1890 stl_phys(addr
, val
);
1894 stq_phys(addr
, val
);
1899 case 0x24: // Nucleus quad LDD 128 bit atomic
1900 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1901 // Only ldda allowed
1902 raise_exception(TT_ILL_INSN
);
1904 case 0x04: // Nucleus
1905 case 0x0c: // Nucleus Little Endian (LE)
1906 case 0x11: // As if user secondary
1907 case 0x19: // As if user secondary LE
1908 case 0x4a: // UPA config
1909 case 0x81: // Secondary
1910 case 0x89: // Secondary LE
1918 env
->lsu
= val
& (DMMU_E
| IMMU_E
);
1919 // Mappings generated during D/I MMU disabled mode are
1920 // invalid in normal mode
1921 if (oldreg
!= env
->lsu
) {
1922 DPRINTF_MMU("LSU change: 0x%" PRIx64
" -> 0x%" PRIx64
"\n",
1931 case 0x50: // I-MMU regs
1933 int reg
= (addr
>> 3) & 0xf;
1936 oldreg
= env
->immuregs
[reg
];
1941 case 1: // Not in I-MMU
1948 val
= 0; // Clear SFSR
1950 case 5: // TSB access
1951 case 6: // Tag access
1955 env
->immuregs
[reg
] = val
;
1956 if (oldreg
!= env
->immuregs
[reg
]) {
1957 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64
" -> 0x%08"
1958 PRIx64
"\n", reg
, oldreg
, env
->immuregs
[reg
]);
1965 case 0x54: // I-MMU data in
1969 // Try finding an invalid entry
1970 for (i
= 0; i
< 64; i
++) {
1971 if ((env
->itlb_tte
[i
] & 0x8000000000000000ULL
) == 0) {
1972 env
->itlb_tag
[i
] = env
->immuregs
[6];
1973 env
->itlb_tte
[i
] = val
;
1977 // Try finding an unlocked entry
1978 for (i
= 0; i
< 64; i
++) {
1979 if ((env
->itlb_tte
[i
] & 0x40) == 0) {
1980 env
->itlb_tag
[i
] = env
->immuregs
[6];
1981 env
->itlb_tte
[i
] = val
;
1988 case 0x55: // I-MMU data access
1992 unsigned int i
= (addr
>> 3) & 0x3f;
1994 env
->itlb_tag
[i
] = env
->immuregs
[6];
1995 env
->itlb_tte
[i
] = val
;
1998 case 0x57: // I-MMU demap
2002 for (i
= 0; i
< 64; i
++) {
2003 if ((env
->itlb_tte
[i
] & 0x8000000000000000ULL
) != 0) {
2004 target_ulong mask
= 0xffffffffffffe000ULL
;
2006 mask
<<= 3 * ((env
->itlb_tte
[i
] >> 61) & 3);
2007 if ((val
& mask
) == (env
->itlb_tag
[i
] & mask
)) {
2008 env
->itlb_tag
[i
] = 0;
2009 env
->itlb_tte
[i
] = 0;
2016 case 0x58: // D-MMU regs
2018 int reg
= (addr
>> 3) & 0xf;
2021 oldreg
= env
->dmmuregs
[reg
];
2027 if ((val
& 1) == 0) {
2028 val
= 0; // Clear SFSR, Fault address
2029 env
->dmmuregs
[4] = 0;
2031 env
->dmmuregs
[reg
] = val
;
2033 case 1: // Primary context
2034 case 2: // Secondary context
2035 case 5: // TSB access
2036 case 6: // Tag access
2037 case 7: // Virtual Watchpoint
2038 case 8: // Physical Watchpoint
2042 env
->dmmuregs
[reg
] = val
;
2043 if (oldreg
!= env
->dmmuregs
[reg
]) {
2044 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64
" -> 0x%08"
2045 PRIx64
"\n", reg
, oldreg
, env
->dmmuregs
[reg
]);
2052 case 0x5c: // D-MMU data in
2056 // Try finding an invalid entry
2057 for (i
= 0; i
< 64; i
++) {
2058 if ((env
->dtlb_tte
[i
] & 0x8000000000000000ULL
) == 0) {
2059 env
->dtlb_tag
[i
] = env
->dmmuregs
[6];
2060 env
->dtlb_tte
[i
] = val
;
2064 // Try finding an unlocked entry
2065 for (i
= 0; i
< 64; i
++) {
2066 if ((env
->dtlb_tte
[i
] & 0x40) == 0) {
2067 env
->dtlb_tag
[i
] = env
->dmmuregs
[6];
2068 env
->dtlb_tte
[i
] = val
;
2075 case 0x5d: // D-MMU data access
2077 unsigned int i
= (addr
>> 3) & 0x3f;
2079 env
->dtlb_tag
[i
] = env
->dmmuregs
[6];
2080 env
->dtlb_tte
[i
] = val
;
2083 case 0x5f: // D-MMU demap
2087 for (i
= 0; i
< 64; i
++) {
2088 if ((env
->dtlb_tte
[i
] & 0x8000000000000000ULL
) != 0) {
2089 target_ulong mask
= 0xffffffffffffe000ULL
;
2091 mask
<<= 3 * ((env
->dtlb_tte
[i
] >> 61) & 3);
2092 if ((val
& mask
) == (env
->dtlb_tag
[i
] & mask
)) {
2093 env
->dtlb_tag
[i
] = 0;
2094 env
->dtlb_tte
[i
] = 0;
2101 case 0x49: // Interrupt data receive
2104 case 0x46: // D-cache data
2105 case 0x47: // D-cache tag access
2106 case 0x4b: // E-cache error enable
2107 case 0x4c: // E-cache asynchronous fault status
2108 case 0x4d: // E-cache asynchronous fault address
2109 case 0x4e: // E-cache tag data
2110 case 0x66: // I-cache instruction access
2111 case 0x67: // I-cache tag access
2112 case 0x6e: // I-cache predecode
2113 case 0x6f: // I-cache LRU etc.
2114 case 0x76: // E-cache tag
2115 case 0x7e: // E-cache tag
2117 case 0x51: // I-MMU 8k TSB pointer, RO
2118 case 0x52: // I-MMU 64k TSB pointer, RO
2119 case 0x56: // I-MMU tag read, RO
2120 case 0x59: // D-MMU 8k TSB pointer, RO
2121 case 0x5a: // D-MMU 64k TSB pointer, RO
2122 case 0x5b: // D-MMU data pointer, RO
2123 case 0x5e: // D-MMU tag read, RO
2124 case 0x48: // Interrupt dispatch, RO
2125 case 0x7f: // Incoming interrupt vector, RO
2126 case 0x82: // Primary no-fault, RO
2127 case 0x83: // Secondary no-fault, RO
2128 case 0x8a: // Primary no-fault LE, RO
2129 case 0x8b: // Secondary no-fault LE, RO
2131 do_unassigned_access(addr
, 1, 0, 1, size
);
2135 #endif /* CONFIG_USER_ONLY */
2137 void helper_ldda_asi(target_ulong addr
, int asi
, int rd
)
2139 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
2140 || ((env
->def
->features
& CPU_FEATURE_HYPV
)
2141 && asi
>= 0x30 && asi
< 0x80
2142 && !(env
->hpstate
& HS_PRIV
)))
2143 raise_exception(TT_PRIV_ACT
);
2146 case 0x24: // Nucleus quad LDD 128 bit atomic
2147 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2148 helper_check_align(addr
, 0xf);
2150 env
->gregs
[1] = ldq_kernel(addr
+ 8);
2152 bswap64s(&env
->gregs
[1]);
2153 } else if (rd
< 8) {
2154 env
->gregs
[rd
] = ldq_kernel(addr
);
2155 env
->gregs
[rd
+ 1] = ldq_kernel(addr
+ 8);
2157 bswap64s(&env
->gregs
[rd
]);
2158 bswap64s(&env
->gregs
[rd
+ 1]);
2161 env
->regwptr
[rd
] = ldq_kernel(addr
);
2162 env
->regwptr
[rd
+ 1] = ldq_kernel(addr
+ 8);
2164 bswap64s(&env
->regwptr
[rd
]);
2165 bswap64s(&env
->regwptr
[rd
+ 1]);
2170 helper_check_align(addr
, 0x3);
2172 env
->gregs
[1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2174 env
->gregs
[rd
] = helper_ld_asi(addr
, asi
, 4, 0);
2175 env
->gregs
[rd
+ 1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2177 env
->regwptr
[rd
] = helper_ld_asi(addr
, asi
, 4, 0);
2178 env
->regwptr
[rd
+ 1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2184 void helper_ldf_asi(target_ulong addr
, int asi
, int size
, int rd
)
2189 helper_check_align(addr
, 3);
2191 case 0xf0: // Block load primary
2192 case 0xf1: // Block load secondary
2193 case 0xf8: // Block load primary LE
2194 case 0xf9: // Block load secondary LE
2196 raise_exception(TT_ILL_INSN
);
2199 helper_check_align(addr
, 0x3f);
2200 for (i
= 0; i
< 16; i
++) {
2201 *(uint32_t *)&env
->fpr
[rd
++] = helper_ld_asi(addr
, asi
& 0x8f, 4,
2211 val
= helper_ld_asi(addr
, asi
, size
, 0);
2215 *((uint32_t *)&env
->fpr
[rd
]) = val
;
2218 *((int64_t *)&DT0
) = val
;
2226 void helper_stf_asi(target_ulong addr
, int asi
, int size
, int rd
)
2229 target_ulong val
= 0;
2231 helper_check_align(addr
, 3);
2233 case 0xe0: // UA2007 Block commit store primary (cache flush)
2234 case 0xe1: // UA2007 Block commit store secondary (cache flush)
2235 case 0xf0: // Block store primary
2236 case 0xf1: // Block store secondary
2237 case 0xf8: // Block store primary LE
2238 case 0xf9: // Block store secondary LE
2240 raise_exception(TT_ILL_INSN
);
2243 helper_check_align(addr
, 0x3f);
2244 for (i
= 0; i
< 16; i
++) {
2245 val
= *(uint32_t *)&env
->fpr
[rd
++];
2246 helper_st_asi(addr
, val
, asi
& 0x8f, 4);
2258 val
= *((uint32_t *)&env
->fpr
[rd
]);
2261 val
= *((int64_t *)&DT0
);
2267 helper_st_asi(addr
, val
, asi
, size
);
2270 target_ulong
helper_cas_asi(target_ulong addr
, target_ulong val1
,
2271 target_ulong val2
, uint32_t asi
)
2275 val2
&= 0xffffffffUL
;
2276 ret
= helper_ld_asi(addr
, asi
, 4, 0);
2277 ret
&= 0xffffffffUL
;
2279 helper_st_asi(addr
, val1
& 0xffffffffUL
, asi
, 4);
2283 target_ulong
helper_casx_asi(target_ulong addr
, target_ulong val1
,
2284 target_ulong val2
, uint32_t asi
)
2288 ret
= helper_ld_asi(addr
, asi
, 8, 0);
2290 helper_st_asi(addr
, val1
, asi
, 8);
2293 #endif /* TARGET_SPARC64 */
2295 #ifndef TARGET_SPARC64
2296 void helper_rett(void)
2300 if (env
->psret
== 1)
2301 raise_exception(TT_ILL_INSN
);
2304 cwp
= cpu_cwp_inc(env
, env
->cwp
+ 1) ;
2305 if (env
->wim
& (1 << cwp
)) {
2306 raise_exception(TT_WIN_UNF
);
2309 env
->psrs
= env
->psrps
;
2313 target_ulong
helper_udiv(target_ulong a
, target_ulong b
)
2318 x0
= (a
& 0xffffffff) | ((int64_t) (env
->y
) << 32);
2322 raise_exception(TT_DIV_ZERO
);
2326 if (x0
> 0xffffffff) {
2335 target_ulong
helper_sdiv(target_ulong a
, target_ulong b
)
2340 x0
= (a
& 0xffffffff) | ((int64_t) (env
->y
) << 32);
2344 raise_exception(TT_DIV_ZERO
);
2348 if ((int32_t) x0
!= x0
) {
2350 return x0
< 0? 0x80000000: 0x7fffffff;
2357 void helper_stdf(target_ulong addr
, int mem_idx
)
2359 helper_check_align(addr
, 7);
2360 #if !defined(CONFIG_USER_ONLY)
2363 stfq_user(addr
, DT0
);
2366 stfq_kernel(addr
, DT0
);
2368 #ifdef TARGET_SPARC64
2370 stfq_hypv(addr
, DT0
);
2377 address_mask(env
, &addr
);
2378 stfq_raw(addr
, DT0
);
2382 void helper_lddf(target_ulong addr
, int mem_idx
)
2384 helper_check_align(addr
, 7);
2385 #if !defined(CONFIG_USER_ONLY)
2388 DT0
= ldfq_user(addr
);
2391 DT0
= ldfq_kernel(addr
);
2393 #ifdef TARGET_SPARC64
2395 DT0
= ldfq_hypv(addr
);
2402 address_mask(env
, &addr
);
2403 DT0
= ldfq_raw(addr
);
2407 void helper_ldqf(target_ulong addr
, int mem_idx
)
2409 // XXX add 128 bit load
2412 helper_check_align(addr
, 7);
2413 #if !defined(CONFIG_USER_ONLY)
2416 u
.ll
.upper
= ldq_user(addr
);
2417 u
.ll
.lower
= ldq_user(addr
+ 8);
2421 u
.ll
.upper
= ldq_kernel(addr
);
2422 u
.ll
.lower
= ldq_kernel(addr
+ 8);
2425 #ifdef TARGET_SPARC64
2427 u
.ll
.upper
= ldq_hypv(addr
);
2428 u
.ll
.lower
= ldq_hypv(addr
+ 8);
2436 address_mask(env
, &addr
);
2437 u
.ll
.upper
= ldq_raw(addr
);
2438 u
.ll
.lower
= ldq_raw((addr
+ 8) & 0xffffffffULL
);
2443 void helper_stqf(target_ulong addr
, int mem_idx
)
2445 // XXX add 128 bit store
2448 helper_check_align(addr
, 7);
2449 #if !defined(CONFIG_USER_ONLY)
2453 stq_user(addr
, u
.ll
.upper
);
2454 stq_user(addr
+ 8, u
.ll
.lower
);
2458 stq_kernel(addr
, u
.ll
.upper
);
2459 stq_kernel(addr
+ 8, u
.ll
.lower
);
2461 #ifdef TARGET_SPARC64
2464 stq_hypv(addr
, u
.ll
.upper
);
2465 stq_hypv(addr
+ 8, u
.ll
.lower
);
2473 address_mask(env
, &addr
);
2474 stq_raw(addr
, u
.ll
.upper
);
2475 stq_raw((addr
+ 8) & 0xffffffffULL
, u
.ll
.lower
);
2479 static inline void set_fsr(void)
2483 switch (env
->fsr
& FSR_RD_MASK
) {
2484 case FSR_RD_NEAREST
:
2485 rnd_mode
= float_round_nearest_even
;
2489 rnd_mode
= float_round_to_zero
;
2492 rnd_mode
= float_round_up
;
2495 rnd_mode
= float_round_down
;
2498 set_float_rounding_mode(rnd_mode
, &env
->fp_status
);
2501 void helper_ldfsr(uint32_t new_fsr
)
2503 env
->fsr
= (new_fsr
& FSR_LDFSR_MASK
) | (env
->fsr
& FSR_LDFSR_OLDMASK
);
2507 #ifdef TARGET_SPARC64
2508 void helper_ldxfsr(uint64_t new_fsr
)
2510 env
->fsr
= (new_fsr
& FSR_LDXFSR_MASK
) | (env
->fsr
& FSR_LDXFSR_OLDMASK
);
2515 void helper_debug(void)
2517 env
->exception_index
= EXCP_DEBUG
;
2521 #ifndef TARGET_SPARC64
2522 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2524 void helper_save(void)
2528 cwp
= cpu_cwp_dec(env
, env
->cwp
- 1);
2529 if (env
->wim
& (1 << cwp
)) {
2530 raise_exception(TT_WIN_OVF
);
2535 void helper_restore(void)
2539 cwp
= cpu_cwp_inc(env
, env
->cwp
+ 1);
2540 if (env
->wim
& (1 << cwp
)) {
2541 raise_exception(TT_WIN_UNF
);
2546 void helper_wrpsr(target_ulong new_psr
)
2548 if ((new_psr
& PSR_CWP
) >= env
->nwindows
)
2549 raise_exception(TT_ILL_INSN
);
2551 PUT_PSR(env
, new_psr
);
2554 target_ulong
helper_rdpsr(void)
2556 return GET_PSR(env
);
2560 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2562 void helper_save(void)
2566 cwp
= cpu_cwp_dec(env
, env
->cwp
- 1);
2567 if (env
->cansave
== 0) {
2568 raise_exception(TT_SPILL
| (env
->otherwin
!= 0 ?
2569 (TT_WOTHER
| ((env
->wstate
& 0x38) >> 1)):
2570 ((env
->wstate
& 0x7) << 2)));
2572 if (env
->cleanwin
- env
->canrestore
== 0) {
2573 // XXX Clean windows without trap
2574 raise_exception(TT_CLRWIN
);
2583 void helper_restore(void)
2587 cwp
= cpu_cwp_inc(env
, env
->cwp
+ 1);
2588 if (env
->canrestore
== 0) {
2589 raise_exception(TT_FILL
| (env
->otherwin
!= 0 ?
2590 (TT_WOTHER
| ((env
->wstate
& 0x38) >> 1)):
2591 ((env
->wstate
& 0x7) << 2)));
2599 void helper_flushw(void)
2601 if (env
->cansave
!= env
->nwindows
- 2) {
2602 raise_exception(TT_SPILL
| (env
->otherwin
!= 0 ?
2603 (TT_WOTHER
| ((env
->wstate
& 0x38) >> 1)):
2604 ((env
->wstate
& 0x7) << 2)));
2608 void helper_saved(void)
2611 if (env
->otherwin
== 0)
2617 void helper_restored(void)
2620 if (env
->cleanwin
< env
->nwindows
- 1)
2622 if (env
->otherwin
== 0)
2628 target_ulong
helper_rdccr(void)
2630 return GET_CCR(env
);
2633 void helper_wrccr(target_ulong new_ccr
)
2635 PUT_CCR(env
, new_ccr
);
2638 // CWP handling is reversed in V9, but we still use the V8 register
2640 target_ulong
helper_rdcwp(void)
2642 return GET_CWP64(env
);
2645 void helper_wrcwp(target_ulong new_cwp
)
2647 PUT_CWP64(env
, new_cwp
);
2650 // This function uses non-native bit order
2651 #define GET_FIELD(X, FROM, TO) \
2652 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
2654 // This function uses the order in the manuals, i.e. bit 0 is 2^0
2655 #define GET_FIELD_SP(X, FROM, TO) \
2656 GET_FIELD(X, 63 - (TO), 63 - (FROM))
2658 target_ulong
helper_array8(target_ulong pixel_addr
, target_ulong cubesize
)
2660 return (GET_FIELD_SP(pixel_addr
, 60, 63) << (17 + 2 * cubesize
)) |
2661 (GET_FIELD_SP(pixel_addr
, 39, 39 + cubesize
- 1) << (17 + cubesize
)) |
2662 (GET_FIELD_SP(pixel_addr
, 17 + cubesize
- 1, 17) << 17) |
2663 (GET_FIELD_SP(pixel_addr
, 56, 59) << 13) |
2664 (GET_FIELD_SP(pixel_addr
, 35, 38) << 9) |
2665 (GET_FIELD_SP(pixel_addr
, 13, 16) << 5) |
2666 (((pixel_addr
>> 55) & 1) << 4) |
2667 (GET_FIELD_SP(pixel_addr
, 33, 34) << 2) |
2668 GET_FIELD_SP(pixel_addr
, 11, 12);
2671 target_ulong
helper_alignaddr(target_ulong addr
, target_ulong offset
)
2675 tmp
= addr
+ offset
;
2677 env
->gsr
|= tmp
& 7ULL;
2681 target_ulong
helper_popc(target_ulong val
)
2683 return ctpop64(val
);
2686 static inline uint64_t *get_gregset(uint64_t pstate
)
2701 static inline void change_pstate(uint64_t new_pstate
)
2703 uint64_t pstate_regs
, new_pstate_regs
;
2704 uint64_t *src
, *dst
;
2706 pstate_regs
= env
->pstate
& 0xc01;
2707 new_pstate_regs
= new_pstate
& 0xc01;
2708 if (new_pstate_regs
!= pstate_regs
) {
2709 // Switch global register bank
2710 src
= get_gregset(new_pstate_regs
);
2711 dst
= get_gregset(pstate_regs
);
2712 memcpy32(dst
, env
->gregs
);
2713 memcpy32(env
->gregs
, src
);
2715 env
->pstate
= new_pstate
;
2718 void helper_wrpstate(target_ulong new_state
)
2720 if (!(env
->def
->features
& CPU_FEATURE_GL
))
2721 change_pstate(new_state
& 0xf3f);
2724 void helper_done(void)
2726 env
->pc
= env
->tsptr
->tpc
;
2727 env
->npc
= env
->tsptr
->tnpc
+ 4;
2728 PUT_CCR(env
, env
->tsptr
->tstate
>> 32);
2729 env
->asi
= (env
->tsptr
->tstate
>> 24) & 0xff;
2730 change_pstate((env
->tsptr
->tstate
>> 8) & 0xf3f);
2731 PUT_CWP64(env
, env
->tsptr
->tstate
& 0xff);
2733 env
->tsptr
= &env
->ts
[env
->tl
& MAXTL_MASK
];
2736 void helper_retry(void)
2738 env
->pc
= env
->tsptr
->tpc
;
2739 env
->npc
= env
->tsptr
->tnpc
;
2740 PUT_CCR(env
, env
->tsptr
->tstate
>> 32);
2741 env
->asi
= (env
->tsptr
->tstate
>> 24) & 0xff;
2742 change_pstate((env
->tsptr
->tstate
>> 8) & 0xf3f);
2743 PUT_CWP64(env
, env
->tsptr
->tstate
& 0xff);
2745 env
->tsptr
= &env
->ts
[env
->tl
& MAXTL_MASK
];
2748 void helper_set_softint(uint64_t value
)
2750 env
->softint
|= (uint32_t)value
;
2753 void helper_clear_softint(uint64_t value
)
2755 env
->softint
&= (uint32_t)~value
;
2758 void helper_write_softint(uint64_t value
)
2760 env
->softint
= (uint32_t)value
;
2764 void helper_flush(target_ulong addr
)
2767 tb_invalidate_page_range(addr
, addr
+ 8);
2770 #ifdef TARGET_SPARC64
2772 static const char * const excp_names
[0x80] = {
2773 [TT_TFAULT
] = "Instruction Access Fault",
2774 [TT_TMISS
] = "Instruction Access MMU Miss",
2775 [TT_CODE_ACCESS
] = "Instruction Access Error",
2776 [TT_ILL_INSN
] = "Illegal Instruction",
2777 [TT_PRIV_INSN
] = "Privileged Instruction",
2778 [TT_NFPU_INSN
] = "FPU Disabled",
2779 [TT_FP_EXCP
] = "FPU Exception",
2780 [TT_TOVF
] = "Tag Overflow",
2781 [TT_CLRWIN
] = "Clean Windows",
2782 [TT_DIV_ZERO
] = "Division By Zero",
2783 [TT_DFAULT
] = "Data Access Fault",
2784 [TT_DMISS
] = "Data Access MMU Miss",
2785 [TT_DATA_ACCESS
] = "Data Access Error",
2786 [TT_DPROT
] = "Data Protection Error",
2787 [TT_UNALIGNED
] = "Unaligned Memory Access",
2788 [TT_PRIV_ACT
] = "Privileged Action",
2789 [TT_EXTINT
| 0x1] = "External Interrupt 1",
2790 [TT_EXTINT
| 0x2] = "External Interrupt 2",
2791 [TT_EXTINT
| 0x3] = "External Interrupt 3",
2792 [TT_EXTINT
| 0x4] = "External Interrupt 4",
2793 [TT_EXTINT
| 0x5] = "External Interrupt 5",
2794 [TT_EXTINT
| 0x6] = "External Interrupt 6",
2795 [TT_EXTINT
| 0x7] = "External Interrupt 7",
2796 [TT_EXTINT
| 0x8] = "External Interrupt 8",
2797 [TT_EXTINT
| 0x9] = "External Interrupt 9",
2798 [TT_EXTINT
| 0xa] = "External Interrupt 10",
2799 [TT_EXTINT
| 0xb] = "External Interrupt 11",
2800 [TT_EXTINT
| 0xc] = "External Interrupt 12",
2801 [TT_EXTINT
| 0xd] = "External Interrupt 13",
2802 [TT_EXTINT
| 0xe] = "External Interrupt 14",
2803 [TT_EXTINT
| 0xf] = "External Interrupt 15",
2807 void do_interrupt(CPUState
*env
)
2809 int intno
= env
->exception_index
;
2812 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
2816 if (intno
< 0 || intno
>= 0x180)
2818 else if (intno
>= 0x100)
2819 name
= "Trap Instruction";
2820 else if (intno
>= 0xc0)
2821 name
= "Window Fill";
2822 else if (intno
>= 0x80)
2823 name
= "Window Spill";
2825 name
= excp_names
[intno
];
2830 qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64
" npc=%016" PRIx64
2831 " SP=%016" PRIx64
"\n",
2834 env
->npc
, env
->regwptr
[6]);
2835 log_cpu_state(env
, 0);
2842 ptr
= (uint8_t *)env
->pc
;
2843 for(i
= 0; i
< 16; i
++) {
2844 qemu_log(" %02x", ldub(ptr
+ i
));
2852 #if !defined(CONFIG_USER_ONLY)
2853 if (env
->tl
>= env
->maxtl
) {
2854 cpu_abort(env
, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
2855 " Error state", env
->exception_index
, env
->tl
, env
->maxtl
);
2859 if (env
->tl
< env
->maxtl
- 1) {
2862 env
->pstate
|= PS_RED
;
2863 if (env
->tl
< env
->maxtl
)
2866 env
->tsptr
= &env
->ts
[env
->tl
& MAXTL_MASK
];
2867 env
->tsptr
->tstate
= ((uint64_t)GET_CCR(env
) << 32) |
2868 ((env
->asi
& 0xff) << 24) | ((env
->pstate
& 0xf3f) << 8) |
2870 env
->tsptr
->tpc
= env
->pc
;
2871 env
->tsptr
->tnpc
= env
->npc
;
2872 env
->tsptr
->tt
= intno
;
2873 if (!(env
->def
->features
& CPU_FEATURE_GL
)) {
2876 change_pstate(PS_PEF
| PS_PRIV
| PS_IG
);
2883 change_pstate(PS_PEF
| PS_PRIV
| PS_MG
);
2886 change_pstate(PS_PEF
| PS_PRIV
| PS_AG
);
2890 if (intno
== TT_CLRWIN
)
2891 cpu_set_cwp(env
, cpu_cwp_dec(env
, env
->cwp
- 1));
2892 else if ((intno
& 0x1c0) == TT_SPILL
)
2893 cpu_set_cwp(env
, cpu_cwp_dec(env
, env
->cwp
- env
->cansave
- 2));
2894 else if ((intno
& 0x1c0) == TT_FILL
)
2895 cpu_set_cwp(env
, cpu_cwp_inc(env
, env
->cwp
+ 1));
2896 env
->tbr
&= ~0x7fffULL
;
2897 env
->tbr
|= ((env
->tl
> 1) ? 1 << 14 : 0) | (intno
<< 5);
2899 env
->npc
= env
->pc
+ 4;
2900 env
->exception_index
= 0;
2904 static const char * const excp_names
[0x80] = {
2905 [TT_TFAULT
] = "Instruction Access Fault",
2906 [TT_ILL_INSN
] = "Illegal Instruction",
2907 [TT_PRIV_INSN
] = "Privileged Instruction",
2908 [TT_NFPU_INSN
] = "FPU Disabled",
2909 [TT_WIN_OVF
] = "Window Overflow",
2910 [TT_WIN_UNF
] = "Window Underflow",
2911 [TT_UNALIGNED
] = "Unaligned Memory Access",
2912 [TT_FP_EXCP
] = "FPU Exception",
2913 [TT_DFAULT
] = "Data Access Fault",
2914 [TT_TOVF
] = "Tag Overflow",
2915 [TT_EXTINT
| 0x1] = "External Interrupt 1",
2916 [TT_EXTINT
| 0x2] = "External Interrupt 2",
2917 [TT_EXTINT
| 0x3] = "External Interrupt 3",
2918 [TT_EXTINT
| 0x4] = "External Interrupt 4",
2919 [TT_EXTINT
| 0x5] = "External Interrupt 5",
2920 [TT_EXTINT
| 0x6] = "External Interrupt 6",
2921 [TT_EXTINT
| 0x7] = "External Interrupt 7",
2922 [TT_EXTINT
| 0x8] = "External Interrupt 8",
2923 [TT_EXTINT
| 0x9] = "External Interrupt 9",
2924 [TT_EXTINT
| 0xa] = "External Interrupt 10",
2925 [TT_EXTINT
| 0xb] = "External Interrupt 11",
2926 [TT_EXTINT
| 0xc] = "External Interrupt 12",
2927 [TT_EXTINT
| 0xd] = "External Interrupt 13",
2928 [TT_EXTINT
| 0xe] = "External Interrupt 14",
2929 [TT_EXTINT
| 0xf] = "External Interrupt 15",
2930 [TT_TOVF
] = "Tag Overflow",
2931 [TT_CODE_ACCESS
] = "Instruction Access Error",
2932 [TT_DATA_ACCESS
] = "Data Access Error",
2933 [TT_DIV_ZERO
] = "Division By Zero",
2934 [TT_NCP_INSN
] = "Coprocessor Disabled",
2938 void do_interrupt(CPUState
*env
)
2940 int cwp
, intno
= env
->exception_index
;
2943 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
2947 if (intno
< 0 || intno
>= 0x100)
2949 else if (intno
>= 0x80)
2950 name
= "Trap Instruction";
2952 name
= excp_names
[intno
];
2957 qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
2960 env
->npc
, env
->regwptr
[6]);
2961 log_cpu_state(env
, 0);
2968 ptr
= (uint8_t *)env
->pc
;
2969 for(i
= 0; i
< 16; i
++) {
2970 qemu_log(" %02x", ldub(ptr
+ i
));
2978 #if !defined(CONFIG_USER_ONLY)
2979 if (env
->psret
== 0) {
2980 cpu_abort(env
, "Trap 0x%02x while interrupts disabled, Error state",
2981 env
->exception_index
);
2986 cwp
= cpu_cwp_dec(env
, env
->cwp
- 1);
2987 cpu_set_cwp(env
, cwp
);
2988 env
->regwptr
[9] = env
->pc
;
2989 env
->regwptr
[10] = env
->npc
;
2990 env
->psrps
= env
->psrs
;
2992 env
->tbr
= (env
->tbr
& TBR_BASE_MASK
) | (intno
<< 4);
2994 env
->npc
= env
->pc
+ 4;
2995 env
->exception_index
= 0;
2999 #if !defined(CONFIG_USER_ONLY)
3001 static void do_unaligned_access(target_ulong addr
, int is_write
, int is_user
,
3004 #define MMUSUFFIX _mmu
3005 #define ALIGNED_ONLY
3008 #include "softmmu_template.h"
3011 #include "softmmu_template.h"
3014 #include "softmmu_template.h"
3017 #include "softmmu_template.h"
3019 /* XXX: make it generic ? */
3020 static void cpu_restore_state2(void *retaddr
)
3022 TranslationBlock
*tb
;
3026 /* now we have a real cpu fault */
3027 pc
= (unsigned long)retaddr
;
3028 tb
= tb_find_pc(pc
);
3030 /* the PC is inside the translated code. It means that we have
3031 a virtual CPU fault */
3032 cpu_restore_state(tb
, env
, pc
, (void *)(long)env
->cond
);
3037 static void do_unaligned_access(target_ulong addr
, int is_write
, int is_user
,
3040 #ifdef DEBUG_UNALIGNED
3041 printf("Unaligned access to 0x" TARGET_FMT_lx
" from 0x" TARGET_FMT_lx
3042 "\n", addr
, env
->pc
);
3044 cpu_restore_state2(retaddr
);
3045 raise_exception(TT_UNALIGNED
);
3048 /* try to fill the TLB and return an exception if error. If retaddr is
3049 NULL, it means that the function was called in C code (i.e. not
3050 from generated code or from helper.c) */
3051 /* XXX: fix it to restore all registers */
3052 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
3055 CPUState
*saved_env
;
3057 /* XXX: hack to restore env in all cases, even if not called from
3060 env
= cpu_single_env
;
3062 ret
= cpu_sparc_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
3064 cpu_restore_state2(retaddr
);
3072 #ifndef TARGET_SPARC64
3073 void do_unassigned_access(target_phys_addr_t addr
, int is_write
, int is_exec
,
3074 int is_asi
, int size
)
3076 CPUState
*saved_env
;
3078 /* XXX: hack to restore env in all cases, even if not called from
3081 env
= cpu_single_env
;
3082 #ifdef DEBUG_UNASSIGNED
3084 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3085 " asi 0x%02x from " TARGET_FMT_lx
"\n",
3086 is_exec
? "exec" : is_write
? "write" : "read", size
,
3087 size
== 1 ? "" : "s", addr
, is_asi
, env
->pc
);
3089 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3090 " from " TARGET_FMT_lx
"\n",
3091 is_exec
? "exec" : is_write
? "write" : "read", size
,
3092 size
== 1 ? "" : "s", addr
, env
->pc
);
3094 if (env
->mmuregs
[3]) /* Fault status register */
3095 env
->mmuregs
[3] = 1; /* overflow (not read before another fault) */
3097 env
->mmuregs
[3] |= 1 << 16;
3099 env
->mmuregs
[3] |= 1 << 5;
3101 env
->mmuregs
[3] |= 1 << 6;
3103 env
->mmuregs
[3] |= 1 << 7;
3104 env
->mmuregs
[3] |= (5 << 2) | 2;
3105 env
->mmuregs
[4] = addr
; /* Fault address register */
3106 if ((env
->mmuregs
[0] & MMU_E
) && !(env
->mmuregs
[0] & MMU_NF
)) {
3108 raise_exception(TT_CODE_ACCESS
);
3110 raise_exception(TT_DATA_ACCESS
);
3115 void do_unassigned_access(target_phys_addr_t addr
, int is_write
, int is_exec
,
3116 int is_asi
, int size
)
3118 #ifdef DEBUG_UNASSIGNED
3119 CPUState
*saved_env
;
3121 /* XXX: hack to restore env in all cases, even if not called from
3124 env
= cpu_single_env
;
3125 printf("Unassigned mem access to " TARGET_FMT_plx
" from " TARGET_FMT_lx
3126 "\n", addr
, env
->pc
);
3130 raise_exception(TT_CODE_ACCESS
);
3132 raise_exception(TT_DATA_ACCESS
);
3136 #ifdef TARGET_SPARC64
3137 void helper_tick_set_count(void *opaque
, uint64_t count
)
3139 #if !defined(CONFIG_USER_ONLY)
3140 cpu_tick_set_count(opaque
, count
);
3144 uint64_t helper_tick_get_count(void *opaque
)
3146 #if !defined(CONFIG_USER_ONLY)
3147 return cpu_tick_get_count(opaque
);
3153 void helper_tick_set_limit(void *opaque
, uint64_t limit
)
3155 #if !defined(CONFIG_USER_ONLY)
3156 cpu_tick_set_limit(opaque
, limit
);