Implement no-fault loads
[qemu/mini2440.git] / target-sparc / op_helper.c
blob75020a92f22125fa5856b53ad3979a8ca9209527
1 #include "exec.h"
2 #include "host-utils.h"
3 #include "helper.h"
4 #if !defined(CONFIG_USER_ONLY)
5 #include "softmmu_exec.h"
6 #endif /* !defined(CONFIG_USER_ONLY) */
8 //#define DEBUG_MMU
9 //#define DEBUG_MXCC
10 //#define DEBUG_UNALIGNED
11 //#define DEBUG_UNASSIGNED
12 //#define DEBUG_ASI
14 #ifdef DEBUG_MMU
15 #define DPRINTF_MMU(fmt, args...) \
16 do { printf("MMU: " fmt , ##args); } while (0)
17 #else
18 #define DPRINTF_MMU(fmt, args...) do {} while (0)
19 #endif
21 #ifdef DEBUG_MXCC
22 #define DPRINTF_MXCC(fmt, args...) \
23 do { printf("MXCC: " fmt , ##args); } while (0)
24 #else
25 #define DPRINTF_MXCC(fmt, args...) do {} while (0)
26 #endif
28 #ifdef DEBUG_ASI
29 #define DPRINTF_ASI(fmt, args...) \
30 do { printf("ASI: " fmt , ##args); } while (0)
31 #else
32 #define DPRINTF_ASI(fmt, args...) do {} while (0)
33 #endif
35 #ifdef TARGET_SPARC64
36 #ifndef TARGET_ABI32
37 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
38 #else
39 #define AM_CHECK(env1) (1)
40 #endif
41 #endif
43 static inline void address_mask(CPUState *env1, target_ulong *addr)
45 #ifdef TARGET_SPARC64
46 if (AM_CHECK(env1))
47 *addr &= 0xffffffffULL;
48 #endif
51 void raise_exception(int tt)
53 env->exception_index = tt;
54 cpu_loop_exit();
57 void helper_trap(target_ulong nb_trap)
59 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
60 cpu_loop_exit();
63 void helper_trapcc(target_ulong nb_trap, target_ulong do_trap)
65 if (do_trap) {
66 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
67 cpu_loop_exit();
71 static inline void set_cwp(int new_cwp)
73 cpu_set_cwp(env, new_cwp);
76 void helper_check_align(target_ulong addr, uint32_t align)
78 if (addr & align) {
79 #ifdef DEBUG_UNALIGNED
80 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
81 "\n", addr, env->pc);
82 #endif
83 raise_exception(TT_UNALIGNED);
87 #define F_HELPER(name, p) void helper_f##name##p(void)
89 #define F_BINOP(name) \
90 F_HELPER(name, s) \
91 { \
92 FT0 = float32_ ## name (FT0, FT1, &env->fp_status); \
93 } \
94 F_HELPER(name, d) \
95 { \
96 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
97 } \
98 F_HELPER(name, q) \
99 { \
100 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
103 F_BINOP(add);
104 F_BINOP(sub);
105 F_BINOP(mul);
106 F_BINOP(div);
107 #undef F_BINOP
109 void helper_fsmuld(void)
111 DT0 = float64_mul(float32_to_float64(FT0, &env->fp_status),
112 float32_to_float64(FT1, &env->fp_status),
113 &env->fp_status);
116 void helper_fdmulq(void)
118 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
119 float64_to_float128(DT1, &env->fp_status),
120 &env->fp_status);
123 F_HELPER(neg, s)
125 FT0 = float32_chs(FT1);
128 #ifdef TARGET_SPARC64
129 F_HELPER(neg, d)
131 DT0 = float64_chs(DT1);
134 F_HELPER(neg, q)
136 QT0 = float128_chs(QT1);
138 #endif
140 /* Integer to float conversion. */
141 F_HELPER(ito, s)
143 FT0 = int32_to_float32(*((int32_t *)&FT1), &env->fp_status);
146 F_HELPER(ito, d)
148 DT0 = int32_to_float64(*((int32_t *)&FT1), &env->fp_status);
151 F_HELPER(ito, q)
153 QT0 = int32_to_float128(*((int32_t *)&FT1), &env->fp_status);
156 #ifdef TARGET_SPARC64
157 F_HELPER(xto, s)
159 FT0 = int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
162 F_HELPER(xto, d)
164 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
167 F_HELPER(xto, q)
169 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
171 #endif
172 #undef F_HELPER
174 /* floating point conversion */
175 void helper_fdtos(void)
177 FT0 = float64_to_float32(DT1, &env->fp_status);
180 void helper_fstod(void)
182 DT0 = float32_to_float64(FT1, &env->fp_status);
185 void helper_fqtos(void)
187 FT0 = float128_to_float32(QT1, &env->fp_status);
190 void helper_fstoq(void)
192 QT0 = float32_to_float128(FT1, &env->fp_status);
195 void helper_fqtod(void)
197 DT0 = float128_to_float64(QT1, &env->fp_status);
200 void helper_fdtoq(void)
202 QT0 = float64_to_float128(DT1, &env->fp_status);
205 /* Float to integer conversion. */
206 void helper_fstoi(void)
208 *((int32_t *)&FT0) = float32_to_int32_round_to_zero(FT1, &env->fp_status);
211 void helper_fdtoi(void)
213 *((int32_t *)&FT0) = float64_to_int32_round_to_zero(DT1, &env->fp_status);
216 void helper_fqtoi(void)
218 *((int32_t *)&FT0) = float128_to_int32_round_to_zero(QT1, &env->fp_status);
221 #ifdef TARGET_SPARC64
222 void helper_fstox(void)
224 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(FT1, &env->fp_status);
227 void helper_fdtox(void)
229 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
232 void helper_fqtox(void)
234 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
237 void helper_faligndata(void)
239 uint64_t tmp;
241 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
242 /* on many architectures a shift of 64 does nothing */
243 if ((env->gsr & 7) != 0) {
244 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
246 *((uint64_t *)&DT0) = tmp;
249 void helper_movl_FT0_0(void)
251 *((uint32_t *)&FT0) = 0;
254 void helper_movl_DT0_0(void)
256 *((uint64_t *)&DT0) = 0;
259 void helper_movl_FT0_1(void)
261 *((uint32_t *)&FT0) = 0xffffffff;
264 void helper_movl_DT0_1(void)
266 *((uint64_t *)&DT0) = 0xffffffffffffffffULL;
269 void helper_fnot(void)
271 *(uint64_t *)&DT0 = ~*(uint64_t *)&DT1;
274 void helper_fnots(void)
276 *(uint32_t *)&FT0 = ~*(uint32_t *)&FT1;
279 void helper_fnor(void)
281 *(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 | *(uint64_t *)&DT1);
284 void helper_fnors(void)
286 *(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 | *(uint32_t *)&FT1);
289 void helper_for(void)
291 *(uint64_t *)&DT0 |= *(uint64_t *)&DT1;
294 void helper_fors(void)
296 *(uint32_t *)&FT0 |= *(uint32_t *)&FT1;
299 void helper_fxor(void)
301 *(uint64_t *)&DT0 ^= *(uint64_t *)&DT1;
304 void helper_fxors(void)
306 *(uint32_t *)&FT0 ^= *(uint32_t *)&FT1;
309 void helper_fand(void)
311 *(uint64_t *)&DT0 &= *(uint64_t *)&DT1;
314 void helper_fands(void)
316 *(uint32_t *)&FT0 &= *(uint32_t *)&FT1;
319 void helper_fornot(void)
321 *(uint64_t *)&DT0 = *(uint64_t *)&DT0 | ~*(uint64_t *)&DT1;
324 void helper_fornots(void)
326 *(uint32_t *)&FT0 = *(uint32_t *)&FT0 | ~*(uint32_t *)&FT1;
329 void helper_fandnot(void)
331 *(uint64_t *)&DT0 = *(uint64_t *)&DT0 & ~*(uint64_t *)&DT1;
334 void helper_fandnots(void)
336 *(uint32_t *)&FT0 = *(uint32_t *)&FT0 & ~*(uint32_t *)&FT1;
339 void helper_fnand(void)
341 *(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 & *(uint64_t *)&DT1);
344 void helper_fnands(void)
346 *(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 & *(uint32_t *)&FT1);
349 void helper_fxnor(void)
351 *(uint64_t *)&DT0 ^= ~*(uint64_t *)&DT1;
354 void helper_fxnors(void)
356 *(uint32_t *)&FT0 ^= ~*(uint32_t *)&FT1;
359 #ifdef WORDS_BIGENDIAN
360 #define VIS_B64(n) b[7 - (n)]
361 #define VIS_W64(n) w[3 - (n)]
362 #define VIS_SW64(n) sw[3 - (n)]
363 #define VIS_L64(n) l[1 - (n)]
364 #define VIS_B32(n) b[3 - (n)]
365 #define VIS_W32(n) w[1 - (n)]
366 #else
367 #define VIS_B64(n) b[n]
368 #define VIS_W64(n) w[n]
369 #define VIS_SW64(n) sw[n]
370 #define VIS_L64(n) l[n]
371 #define VIS_B32(n) b[n]
372 #define VIS_W32(n) w[n]
373 #endif
375 typedef union {
376 uint8_t b[8];
377 uint16_t w[4];
378 int16_t sw[4];
379 uint32_t l[2];
380 float64 d;
381 } vis64;
383 typedef union {
384 uint8_t b[4];
385 uint16_t w[2];
386 uint32_t l;
387 float32 f;
388 } vis32;
390 void helper_fpmerge(void)
392 vis64 s, d;
394 s.d = DT0;
395 d.d = DT1;
397 // Reverse calculation order to handle overlap
398 d.VIS_B64(7) = s.VIS_B64(3);
399 d.VIS_B64(6) = d.VIS_B64(3);
400 d.VIS_B64(5) = s.VIS_B64(2);
401 d.VIS_B64(4) = d.VIS_B64(2);
402 d.VIS_B64(3) = s.VIS_B64(1);
403 d.VIS_B64(2) = d.VIS_B64(1);
404 d.VIS_B64(1) = s.VIS_B64(0);
405 //d.VIS_B64(0) = d.VIS_B64(0);
407 DT0 = d.d;
410 void helper_fmul8x16(void)
412 vis64 s, d;
413 uint32_t tmp;
415 s.d = DT0;
416 d.d = DT1;
418 #define PMUL(r) \
419 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
420 if ((tmp & 0xff) > 0x7f) \
421 tmp += 0x100; \
422 d.VIS_W64(r) = tmp >> 8;
424 PMUL(0);
425 PMUL(1);
426 PMUL(2);
427 PMUL(3);
428 #undef PMUL
430 DT0 = d.d;
433 void helper_fmul8x16al(void)
435 vis64 s, d;
436 uint32_t tmp;
438 s.d = DT0;
439 d.d = DT1;
441 #define PMUL(r) \
442 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
443 if ((tmp & 0xff) > 0x7f) \
444 tmp += 0x100; \
445 d.VIS_W64(r) = tmp >> 8;
447 PMUL(0);
448 PMUL(1);
449 PMUL(2);
450 PMUL(3);
451 #undef PMUL
453 DT0 = d.d;
456 void helper_fmul8x16au(void)
458 vis64 s, d;
459 uint32_t tmp;
461 s.d = DT0;
462 d.d = DT1;
464 #define PMUL(r) \
465 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
466 if ((tmp & 0xff) > 0x7f) \
467 tmp += 0x100; \
468 d.VIS_W64(r) = tmp >> 8;
470 PMUL(0);
471 PMUL(1);
472 PMUL(2);
473 PMUL(3);
474 #undef PMUL
476 DT0 = d.d;
479 void helper_fmul8sux16(void)
481 vis64 s, d;
482 uint32_t tmp;
484 s.d = DT0;
485 d.d = DT1;
487 #define PMUL(r) \
488 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
489 if ((tmp & 0xff) > 0x7f) \
490 tmp += 0x100; \
491 d.VIS_W64(r) = tmp >> 8;
493 PMUL(0);
494 PMUL(1);
495 PMUL(2);
496 PMUL(3);
497 #undef PMUL
499 DT0 = d.d;
502 void helper_fmul8ulx16(void)
504 vis64 s, d;
505 uint32_t tmp;
507 s.d = DT0;
508 d.d = DT1;
510 #define PMUL(r) \
511 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
512 if ((tmp & 0xff) > 0x7f) \
513 tmp += 0x100; \
514 d.VIS_W64(r) = tmp >> 8;
516 PMUL(0);
517 PMUL(1);
518 PMUL(2);
519 PMUL(3);
520 #undef PMUL
522 DT0 = d.d;
525 void helper_fmuld8sux16(void)
527 vis64 s, d;
528 uint32_t tmp;
530 s.d = DT0;
531 d.d = DT1;
533 #define PMUL(r) \
534 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
535 if ((tmp & 0xff) > 0x7f) \
536 tmp += 0x100; \
537 d.VIS_L64(r) = tmp;
539 // Reverse calculation order to handle overlap
540 PMUL(1);
541 PMUL(0);
542 #undef PMUL
544 DT0 = d.d;
547 void helper_fmuld8ulx16(void)
549 vis64 s, d;
550 uint32_t tmp;
552 s.d = DT0;
553 d.d = DT1;
555 #define PMUL(r) \
556 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
557 if ((tmp & 0xff) > 0x7f) \
558 tmp += 0x100; \
559 d.VIS_L64(r) = tmp;
561 // Reverse calculation order to handle overlap
562 PMUL(1);
563 PMUL(0);
564 #undef PMUL
566 DT0 = d.d;
569 void helper_fexpand(void)
571 vis32 s;
572 vis64 d;
574 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
575 d.d = DT1;
576 d.VIS_L64(0) = s.VIS_W32(0) << 4;
577 d.VIS_L64(1) = s.VIS_W32(1) << 4;
578 d.VIS_L64(2) = s.VIS_W32(2) << 4;
579 d.VIS_L64(3) = s.VIS_W32(3) << 4;
581 DT0 = d.d;
584 #define VIS_HELPER(name, F) \
585 void name##16(void) \
587 vis64 s, d; \
589 s.d = DT0; \
590 d.d = DT1; \
592 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
593 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
594 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
595 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
597 DT0 = d.d; \
600 void name##16s(void) \
602 vis32 s, d; \
604 s.f = FT0; \
605 d.f = FT1; \
607 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
608 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
610 FT0 = d.f; \
613 void name##32(void) \
615 vis64 s, d; \
617 s.d = DT0; \
618 d.d = DT1; \
620 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
621 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
623 DT0 = d.d; \
626 void name##32s(void) \
628 vis32 s, d; \
630 s.f = FT0; \
631 d.f = FT1; \
633 d.l = F(d.l, s.l); \
635 FT0 = d.f; \
638 #define FADD(a, b) ((a) + (b))
639 #define FSUB(a, b) ((a) - (b))
640 VIS_HELPER(helper_fpadd, FADD)
641 VIS_HELPER(helper_fpsub, FSUB)
643 #define VIS_CMPHELPER(name, F) \
644 void name##16(void) \
646 vis64 s, d; \
648 s.d = DT0; \
649 d.d = DT1; \
651 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
652 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
653 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
654 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
656 DT0 = d.d; \
659 void name##32(void) \
661 vis64 s, d; \
663 s.d = DT0; \
664 d.d = DT1; \
666 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
667 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
669 DT0 = d.d; \
672 #define FCMPGT(a, b) ((a) > (b))
673 #define FCMPEQ(a, b) ((a) == (b))
674 #define FCMPLE(a, b) ((a) <= (b))
675 #define FCMPNE(a, b) ((a) != (b))
677 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
678 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
679 VIS_CMPHELPER(helper_fcmple, FCMPLE)
680 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
681 #endif
683 void helper_check_ieee_exceptions(void)
685 target_ulong status;
687 status = get_float_exception_flags(&env->fp_status);
688 if (status) {
689 /* Copy IEEE 754 flags into FSR */
690 if (status & float_flag_invalid)
691 env->fsr |= FSR_NVC;
692 if (status & float_flag_overflow)
693 env->fsr |= FSR_OFC;
694 if (status & float_flag_underflow)
695 env->fsr |= FSR_UFC;
696 if (status & float_flag_divbyzero)
697 env->fsr |= FSR_DZC;
698 if (status & float_flag_inexact)
699 env->fsr |= FSR_NXC;
701 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
702 /* Unmasked exception, generate a trap */
703 env->fsr |= FSR_FTT_IEEE_EXCP;
704 raise_exception(TT_FP_EXCP);
705 } else {
706 /* Accumulate exceptions */
707 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
712 void helper_clear_float_exceptions(void)
714 set_float_exception_flags(0, &env->fp_status);
717 void helper_fabss(void)
719 FT0 = float32_abs(FT1);
722 #ifdef TARGET_SPARC64
723 void helper_fabsd(void)
725 DT0 = float64_abs(DT1);
728 void helper_fabsq(void)
730 QT0 = float128_abs(QT1);
732 #endif
734 void helper_fsqrts(void)
736 FT0 = float32_sqrt(FT1, &env->fp_status);
739 void helper_fsqrtd(void)
741 DT0 = float64_sqrt(DT1, &env->fp_status);
744 void helper_fsqrtq(void)
746 QT0 = float128_sqrt(QT1, &env->fp_status);
749 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
750 void glue(helper_, name) (void) \
752 target_ulong new_fsr; \
754 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
755 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
756 case float_relation_unordered: \
757 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
758 if ((env->fsr & FSR_NVM) || TRAP) { \
759 env->fsr |= new_fsr; \
760 env->fsr |= FSR_NVC; \
761 env->fsr |= FSR_FTT_IEEE_EXCP; \
762 raise_exception(TT_FP_EXCP); \
763 } else { \
764 env->fsr |= FSR_NVA; \
766 break; \
767 case float_relation_less: \
768 new_fsr = FSR_FCC0 << FS; \
769 break; \
770 case float_relation_greater: \
771 new_fsr = FSR_FCC1 << FS; \
772 break; \
773 default: \
774 new_fsr = 0; \
775 break; \
777 env->fsr |= new_fsr; \
780 GEN_FCMP(fcmps, float32, FT0, FT1, 0, 0);
781 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
783 GEN_FCMP(fcmpes, float32, FT0, FT1, 0, 1);
784 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
786 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
787 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
789 #ifdef TARGET_SPARC64
790 GEN_FCMP(fcmps_fcc1, float32, FT0, FT1, 22, 0);
791 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
792 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
794 GEN_FCMP(fcmps_fcc2, float32, FT0, FT1, 24, 0);
795 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
796 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
798 GEN_FCMP(fcmps_fcc3, float32, FT0, FT1, 26, 0);
799 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
800 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
802 GEN_FCMP(fcmpes_fcc1, float32, FT0, FT1, 22, 1);
803 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
804 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
806 GEN_FCMP(fcmpes_fcc2, float32, FT0, FT1, 24, 1);
807 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
808 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
810 GEN_FCMP(fcmpes_fcc3, float32, FT0, FT1, 26, 1);
811 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
812 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
813 #endif
815 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
816 defined(DEBUG_MXCC)
817 static void dump_mxcc(CPUState *env)
819 printf("mxccdata: %016llx %016llx %016llx %016llx\n",
820 env->mxccdata[0], env->mxccdata[1],
821 env->mxccdata[2], env->mxccdata[3]);
822 printf("mxccregs: %016llx %016llx %016llx %016llx\n"
823 " %016llx %016llx %016llx %016llx\n",
824 env->mxccregs[0], env->mxccregs[1],
825 env->mxccregs[2], env->mxccregs[3],
826 env->mxccregs[4], env->mxccregs[5],
827 env->mxccregs[6], env->mxccregs[7]);
829 #endif
831 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
832 && defined(DEBUG_ASI)
833 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
834 uint64_t r1)
836 switch (size)
838 case 1:
839 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
840 addr, asi, r1 & 0xff);
841 break;
842 case 2:
843 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
844 addr, asi, r1 & 0xffff);
845 break;
846 case 4:
847 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
848 addr, asi, r1 & 0xffffffff);
849 break;
850 case 8:
851 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
852 addr, asi, r1);
853 break;
856 #endif
858 #ifndef TARGET_SPARC64
859 #ifndef CONFIG_USER_ONLY
860 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
862 uint64_t ret = 0;
863 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
864 uint32_t last_addr = addr;
865 #endif
867 helper_check_align(addr, size - 1);
868 switch (asi) {
869 case 2: /* SuperSparc MXCC registers */
870 switch (addr) {
871 case 0x01c00a00: /* MXCC control register */
872 if (size == 8)
873 ret = env->mxccregs[3];
874 else
875 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
876 size);
877 break;
878 case 0x01c00a04: /* MXCC control register */
879 if (size == 4)
880 ret = env->mxccregs[3];
881 else
882 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
883 size);
884 break;
885 case 0x01c00c00: /* Module reset register */
886 if (size == 8) {
887 ret = env->mxccregs[5];
888 // should we do something here?
889 } else
890 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
891 size);
892 break;
893 case 0x01c00f00: /* MBus port address register */
894 if (size == 8)
895 ret = env->mxccregs[7];
896 else
897 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
898 size);
899 break;
900 default:
901 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
902 size);
903 break;
905 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
906 "addr = %08x -> ret = %08x,"
907 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
908 #ifdef DEBUG_MXCC
909 dump_mxcc(env);
910 #endif
911 break;
912 case 3: /* MMU probe */
914 int mmulev;
916 mmulev = (addr >> 8) & 15;
917 if (mmulev > 4)
918 ret = 0;
919 else
920 ret = mmu_probe(env, addr, mmulev);
921 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
922 addr, mmulev, ret);
924 break;
925 case 4: /* read MMU regs */
927 int reg = (addr >> 8) & 0x1f;
929 ret = env->mmuregs[reg];
930 if (reg == 3) /* Fault status cleared on read */
931 env->mmuregs[3] = 0;
932 else if (reg == 0x13) /* Fault status read */
933 ret = env->mmuregs[3];
934 else if (reg == 0x14) /* Fault address read */
935 ret = env->mmuregs[4];
936 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
938 break;
939 case 5: // Turbosparc ITLB Diagnostic
940 case 6: // Turbosparc DTLB Diagnostic
941 case 7: // Turbosparc IOTLB Diagnostic
942 break;
943 case 9: /* Supervisor code access */
944 switch(size) {
945 case 1:
946 ret = ldub_code(addr);
947 break;
948 case 2:
949 ret = lduw_code(addr);
950 break;
951 default:
952 case 4:
953 ret = ldl_code(addr);
954 break;
955 case 8:
956 ret = ldq_code(addr);
957 break;
959 break;
960 case 0xa: /* User data access */
961 switch(size) {
962 case 1:
963 ret = ldub_user(addr);
964 break;
965 case 2:
966 ret = lduw_user(addr);
967 break;
968 default:
969 case 4:
970 ret = ldl_user(addr);
971 break;
972 case 8:
973 ret = ldq_user(addr);
974 break;
976 break;
977 case 0xb: /* Supervisor data access */
978 switch(size) {
979 case 1:
980 ret = ldub_kernel(addr);
981 break;
982 case 2:
983 ret = lduw_kernel(addr);
984 break;
985 default:
986 case 4:
987 ret = ldl_kernel(addr);
988 break;
989 case 8:
990 ret = ldq_kernel(addr);
991 break;
993 break;
994 case 0xc: /* I-cache tag */
995 case 0xd: /* I-cache data */
996 case 0xe: /* D-cache tag */
997 case 0xf: /* D-cache data */
998 break;
999 case 0x20: /* MMU passthrough */
1000 switch(size) {
1001 case 1:
1002 ret = ldub_phys(addr);
1003 break;
1004 case 2:
1005 ret = lduw_phys(addr);
1006 break;
1007 default:
1008 case 4:
1009 ret = ldl_phys(addr);
1010 break;
1011 case 8:
1012 ret = ldq_phys(addr);
1013 break;
1015 break;
1016 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1017 switch(size) {
1018 case 1:
1019 ret = ldub_phys((target_phys_addr_t)addr
1020 | ((target_phys_addr_t)(asi & 0xf) << 32));
1021 break;
1022 case 2:
1023 ret = lduw_phys((target_phys_addr_t)addr
1024 | ((target_phys_addr_t)(asi & 0xf) << 32));
1025 break;
1026 default:
1027 case 4:
1028 ret = ldl_phys((target_phys_addr_t)addr
1029 | ((target_phys_addr_t)(asi & 0xf) << 32));
1030 break;
1031 case 8:
1032 ret = ldq_phys((target_phys_addr_t)addr
1033 | ((target_phys_addr_t)(asi & 0xf) << 32));
1034 break;
1036 break;
1037 case 0x30: // Turbosparc secondary cache diagnostic
1038 case 0x31: // Turbosparc RAM snoop
1039 case 0x32: // Turbosparc page table descriptor diagnostic
1040 case 0x39: /* data cache diagnostic register */
1041 ret = 0;
1042 break;
1043 case 8: /* User code access, XXX */
1044 default:
1045 do_unassigned_access(addr, 0, 0, asi);
1046 ret = 0;
1047 break;
1049 if (sign) {
1050 switch(size) {
1051 case 1:
1052 ret = (int8_t) ret;
1053 break;
1054 case 2:
1055 ret = (int16_t) ret;
1056 break;
1057 case 4:
1058 ret = (int32_t) ret;
1059 break;
1060 default:
1061 break;
1064 #ifdef DEBUG_ASI
1065 dump_asi("read ", last_addr, asi, size, ret);
1066 #endif
1067 return ret;
1070 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1072 helper_check_align(addr, size - 1);
1073 switch(asi) {
1074 case 2: /* SuperSparc MXCC registers */
1075 switch (addr) {
1076 case 0x01c00000: /* MXCC stream data register 0 */
1077 if (size == 8)
1078 env->mxccdata[0] = val;
1079 else
1080 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1081 size);
1082 break;
1083 case 0x01c00008: /* MXCC stream data register 1 */
1084 if (size == 8)
1085 env->mxccdata[1] = val;
1086 else
1087 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1088 size);
1089 break;
1090 case 0x01c00010: /* MXCC stream data register 2 */
1091 if (size == 8)
1092 env->mxccdata[2] = val;
1093 else
1094 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1095 size);
1096 break;
1097 case 0x01c00018: /* MXCC stream data register 3 */
1098 if (size == 8)
1099 env->mxccdata[3] = val;
1100 else
1101 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1102 size);
1103 break;
1104 case 0x01c00100: /* MXCC stream source */
1105 if (size == 8)
1106 env->mxccregs[0] = val;
1107 else
1108 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1109 size);
1110 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1112 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1114 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1115 16);
1116 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1117 24);
1118 break;
1119 case 0x01c00200: /* MXCC stream destination */
1120 if (size == 8)
1121 env->mxccregs[1] = val;
1122 else
1123 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1124 size);
1125 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
1126 env->mxccdata[0]);
1127 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
1128 env->mxccdata[1]);
1129 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1130 env->mxccdata[2]);
1131 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1132 env->mxccdata[3]);
1133 break;
1134 case 0x01c00a00: /* MXCC control register */
1135 if (size == 8)
1136 env->mxccregs[3] = val;
1137 else
1138 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1139 size);
1140 break;
1141 case 0x01c00a04: /* MXCC control register */
1142 if (size == 4)
1143 env->mxccregs[3] = (env->mxccregs[0xa] & 0xffffffff00000000ULL)
1144 | val;
1145 else
1146 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1147 size);
1148 break;
1149 case 0x01c00e00: /* MXCC error register */
1150 // writing a 1 bit clears the error
1151 if (size == 8)
1152 env->mxccregs[6] &= ~val;
1153 else
1154 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1155 size);
1156 break;
1157 case 0x01c00f00: /* MBus port address register */
1158 if (size == 8)
1159 env->mxccregs[7] = val;
1160 else
1161 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1162 size);
1163 break;
1164 default:
1165 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1166 size);
1167 break;
1169 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %08x\n", asi,
1170 size, addr, val);
1171 #ifdef DEBUG_MXCC
1172 dump_mxcc(env);
1173 #endif
1174 break;
1175 case 3: /* MMU flush */
1177 int mmulev;
1179 mmulev = (addr >> 8) & 15;
1180 DPRINTF_MMU("mmu flush level %d\n", mmulev);
1181 switch (mmulev) {
1182 case 0: // flush page
1183 tlb_flush_page(env, addr & 0xfffff000);
1184 break;
1185 case 1: // flush segment (256k)
1186 case 2: // flush region (16M)
1187 case 3: // flush context (4G)
1188 case 4: // flush entire
1189 tlb_flush(env, 1);
1190 break;
1191 default:
1192 break;
1194 #ifdef DEBUG_MMU
1195 dump_mmu(env);
1196 #endif
1198 break;
1199 case 4: /* write MMU regs */
1201 int reg = (addr >> 8) & 0x1f;
1202 uint32_t oldreg;
1204 oldreg = env->mmuregs[reg];
1205 switch(reg) {
1206 case 0: // Control Register
1207 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1208 (val & 0x00ffffff);
1209 // Mappings generated during no-fault mode or MMU
1210 // disabled mode are invalid in normal mode
1211 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1212 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1213 tlb_flush(env, 1);
1214 break;
1215 case 1: // Context Table Pointer Register
1216 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1217 break;
1218 case 2: // Context Register
1219 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1220 if (oldreg != env->mmuregs[reg]) {
1221 /* we flush when the MMU context changes because
1222 QEMU has no MMU context support */
1223 tlb_flush(env, 1);
1225 break;
1226 case 3: // Synchronous Fault Status Register with Clear
1227 case 4: // Synchronous Fault Address Register
1228 break;
1229 case 0x10: // TLB Replacement Control Register
1230 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1231 break;
1232 case 0x13: // Synchronous Fault Status Register with Read and Clear
1233 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
1234 break;
1235 case 0x14: // Synchronous Fault Address Register
1236 env->mmuregs[4] = val;
1237 break;
1238 default:
1239 env->mmuregs[reg] = val;
1240 break;
1242 if (oldreg != env->mmuregs[reg]) {
1243 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1244 reg, oldreg, env->mmuregs[reg]);
1246 #ifdef DEBUG_MMU
1247 dump_mmu(env);
1248 #endif
1250 break;
1251 case 5: // Turbosparc ITLB Diagnostic
1252 case 6: // Turbosparc DTLB Diagnostic
1253 case 7: // Turbosparc IOTLB Diagnostic
1254 break;
1255 case 0xa: /* User data access */
1256 switch(size) {
1257 case 1:
1258 stb_user(addr, val);
1259 break;
1260 case 2:
1261 stw_user(addr, val);
1262 break;
1263 default:
1264 case 4:
1265 stl_user(addr, val);
1266 break;
1267 case 8:
1268 stq_user(addr, val);
1269 break;
1271 break;
1272 case 0xb: /* Supervisor data access */
1273 switch(size) {
1274 case 1:
1275 stb_kernel(addr, val);
1276 break;
1277 case 2:
1278 stw_kernel(addr, val);
1279 break;
1280 default:
1281 case 4:
1282 stl_kernel(addr, val);
1283 break;
1284 case 8:
1285 stq_kernel(addr, val);
1286 break;
1288 break;
1289 case 0xc: /* I-cache tag */
1290 case 0xd: /* I-cache data */
1291 case 0xe: /* D-cache tag */
1292 case 0xf: /* D-cache data */
1293 case 0x10: /* I/D-cache flush page */
1294 case 0x11: /* I/D-cache flush segment */
1295 case 0x12: /* I/D-cache flush region */
1296 case 0x13: /* I/D-cache flush context */
1297 case 0x14: /* I/D-cache flush user */
1298 break;
1299 case 0x17: /* Block copy, sta access */
1301 // val = src
1302 // addr = dst
1303 // copy 32 bytes
1304 unsigned int i;
1305 uint32_t src = val & ~3, dst = addr & ~3, temp;
1307 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
1308 temp = ldl_kernel(src);
1309 stl_kernel(dst, temp);
1312 break;
1313 case 0x1f: /* Block fill, stda access */
1315 // addr = dst
1316 // fill 32 bytes with val
1317 unsigned int i;
1318 uint32_t dst = addr & 7;
1320 for (i = 0; i < 32; i += 8, dst += 8)
1321 stq_kernel(dst, val);
1323 break;
1324 case 0x20: /* MMU passthrough */
1326 switch(size) {
1327 case 1:
1328 stb_phys(addr, val);
1329 break;
1330 case 2:
1331 stw_phys(addr, val);
1332 break;
1333 case 4:
1334 default:
1335 stl_phys(addr, val);
1336 break;
1337 case 8:
1338 stq_phys(addr, val);
1339 break;
1342 break;
1343 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1345 switch(size) {
1346 case 1:
1347 stb_phys((target_phys_addr_t)addr
1348 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1349 break;
1350 case 2:
1351 stw_phys((target_phys_addr_t)addr
1352 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1353 break;
1354 case 4:
1355 default:
1356 stl_phys((target_phys_addr_t)addr
1357 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1358 break;
1359 case 8:
1360 stq_phys((target_phys_addr_t)addr
1361 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1362 break;
1365 break;
1366 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1367 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1368 // Turbosparc snoop RAM
1369 case 0x32: // store buffer control or Turbosparc page table
1370 // descriptor diagnostic
1371 case 0x36: /* I-cache flash clear */
1372 case 0x37: /* D-cache flash clear */
1373 case 0x38: /* breakpoint diagnostics */
1374 case 0x4c: /* breakpoint action */
1375 break;
1376 case 8: /* User code access, XXX */
1377 case 9: /* Supervisor code access, XXX */
1378 default:
1379 do_unassigned_access(addr, 1, 0, asi);
1380 break;
1382 #ifdef DEBUG_ASI
1383 dump_asi("write", addr, asi, size, val);
1384 #endif
1387 #endif /* CONFIG_USER_ONLY */
1388 #else /* TARGET_SPARC64 */
1390 #ifdef CONFIG_USER_ONLY
1391 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1393 uint64_t ret = 0;
1394 #if defined(DEBUG_ASI)
1395 target_ulong last_addr = addr;
1396 #endif
1398 if (asi < 0x80)
1399 raise_exception(TT_PRIV_ACT);
1401 helper_check_align(addr, size - 1);
1402 address_mask(env, &addr);
1404 switch (asi) {
1405 case 0x82: // Primary no-fault
1406 case 0x8a: // Primary no-fault LE
1407 if (page_check_range(addr, size, PAGE_READ) == -1) {
1408 #ifdef DEBUG_ASI
1409 dump_asi("read ", last_addr, asi, size, ret);
1410 #endif
1411 return 0;
1413 // Fall through
1414 case 0x80: // Primary
1415 case 0x88: // Primary LE
1417 switch(size) {
1418 case 1:
1419 ret = ldub_raw(addr);
1420 break;
1421 case 2:
1422 ret = lduw_raw(addr);
1423 break;
1424 case 4:
1425 ret = ldl_raw(addr);
1426 break;
1427 default:
1428 case 8:
1429 ret = ldq_raw(addr);
1430 break;
1433 break;
1434 case 0x83: // Secondary no-fault
1435 case 0x8b: // Secondary no-fault LE
1436 if (page_check_range(addr, size, PAGE_READ) == -1) {
1437 #ifdef DEBUG_ASI
1438 dump_asi("read ", last_addr, asi, size, ret);
1439 #endif
1440 return 0;
1442 // Fall through
1443 case 0x81: // Secondary
1444 case 0x89: // Secondary LE
1445 // XXX
1446 break;
1447 default:
1448 break;
1451 /* Convert from little endian */
1452 switch (asi) {
1453 case 0x88: // Primary LE
1454 case 0x89: // Secondary LE
1455 case 0x8a: // Primary no-fault LE
1456 case 0x8b: // Secondary no-fault LE
1457 switch(size) {
1458 case 2:
1459 ret = bswap16(ret);
1460 break;
1461 case 4:
1462 ret = bswap32(ret);
1463 break;
1464 case 8:
1465 ret = bswap64(ret);
1466 break;
1467 default:
1468 break;
1470 default:
1471 break;
1474 /* Convert to signed number */
1475 if (sign) {
1476 switch(size) {
1477 case 1:
1478 ret = (int8_t) ret;
1479 break;
1480 case 2:
1481 ret = (int16_t) ret;
1482 break;
1483 case 4:
1484 ret = (int32_t) ret;
1485 break;
1486 default:
1487 break;
1490 #ifdef DEBUG_ASI
1491 dump_asi("read ", last_addr, asi, size, ret);
1492 #endif
1493 return ret;
1496 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1498 #ifdef DEBUG_ASI
1499 dump_asi("write", addr, asi, size, val);
1500 #endif
1501 if (asi < 0x80)
1502 raise_exception(TT_PRIV_ACT);
1504 helper_check_align(addr, size - 1);
1505 address_mask(env, &addr);
1507 /* Convert to little endian */
1508 switch (asi) {
1509 case 0x88: // Primary LE
1510 case 0x89: // Secondary LE
1511 switch(size) {
1512 case 2:
1513 addr = bswap16(addr);
1514 break;
1515 case 4:
1516 addr = bswap32(addr);
1517 break;
1518 case 8:
1519 addr = bswap64(addr);
1520 break;
1521 default:
1522 break;
1524 default:
1525 break;
1528 switch(asi) {
1529 case 0x80: // Primary
1530 case 0x88: // Primary LE
1532 switch(size) {
1533 case 1:
1534 stb_raw(addr, val);
1535 break;
1536 case 2:
1537 stw_raw(addr, val);
1538 break;
1539 case 4:
1540 stl_raw(addr, val);
1541 break;
1542 case 8:
1543 default:
1544 stq_raw(addr, val);
1545 break;
1548 break;
1549 case 0x81: // Secondary
1550 case 0x89: // Secondary LE
1551 // XXX
1552 return;
1554 case 0x82: // Primary no-fault, RO
1555 case 0x83: // Secondary no-fault, RO
1556 case 0x8a: // Primary no-fault LE, RO
1557 case 0x8b: // Secondary no-fault LE, RO
1558 default:
1559 do_unassigned_access(addr, 1, 0, 1);
1560 return;
1564 #else /* CONFIG_USER_ONLY */
1566 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1568 uint64_t ret = 0;
1569 #if defined(DEBUG_ASI)
1570 target_ulong last_addr = addr;
1571 #endif
1573 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1574 || ((env->def->features & CPU_FEATURE_HYPV)
1575 && asi >= 0x30 && asi < 0x80
1576 && !(env->hpstate & HS_PRIV)))
1577 raise_exception(TT_PRIV_ACT);
1579 helper_check_align(addr, size - 1);
1580 switch (asi) {
1581 case 0x82: // Primary no-fault
1582 case 0x8a: // Primary no-fault LE
1583 if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
1584 #ifdef DEBUG_ASI
1585 dump_asi("read ", last_addr, asi, size, ret);
1586 #endif
1587 return 0;
1589 // Fall through
1590 case 0x10: // As if user primary
1591 case 0x18: // As if user primary LE
1592 case 0x80: // Primary
1593 case 0x88: // Primary LE
1594 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1595 if ((env->def->features & CPU_FEATURE_HYPV)
1596 && env->hpstate & HS_PRIV) {
1597 switch(size) {
1598 case 1:
1599 ret = ldub_hypv(addr);
1600 break;
1601 case 2:
1602 ret = lduw_hypv(addr);
1603 break;
1604 case 4:
1605 ret = ldl_hypv(addr);
1606 break;
1607 default:
1608 case 8:
1609 ret = ldq_hypv(addr);
1610 break;
1612 } else {
1613 switch(size) {
1614 case 1:
1615 ret = ldub_kernel(addr);
1616 break;
1617 case 2:
1618 ret = lduw_kernel(addr);
1619 break;
1620 case 4:
1621 ret = ldl_kernel(addr);
1622 break;
1623 default:
1624 case 8:
1625 ret = ldq_kernel(addr);
1626 break;
1629 } else {
1630 switch(size) {
1631 case 1:
1632 ret = ldub_user(addr);
1633 break;
1634 case 2:
1635 ret = lduw_user(addr);
1636 break;
1637 case 4:
1638 ret = ldl_user(addr);
1639 break;
1640 default:
1641 case 8:
1642 ret = ldq_user(addr);
1643 break;
1646 break;
1647 case 0x14: // Bypass
1648 case 0x15: // Bypass, non-cacheable
1649 case 0x1c: // Bypass LE
1650 case 0x1d: // Bypass, non-cacheable LE
1652 switch(size) {
1653 case 1:
1654 ret = ldub_phys(addr);
1655 break;
1656 case 2:
1657 ret = lduw_phys(addr);
1658 break;
1659 case 4:
1660 ret = ldl_phys(addr);
1661 break;
1662 default:
1663 case 8:
1664 ret = ldq_phys(addr);
1665 break;
1667 break;
1669 case 0x24: // Nucleus quad LDD 128 bit atomic
1670 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1671 // Only ldda allowed
1672 raise_exception(TT_ILL_INSN);
1673 return 0;
1674 case 0x83: // Secondary no-fault
1675 case 0x8b: // Secondary no-fault LE
1676 if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
1677 #ifdef DEBUG_ASI
1678 dump_asi("read ", last_addr, asi, size, ret);
1679 #endif
1680 return 0;
1682 // Fall through
1683 case 0x04: // Nucleus
1684 case 0x0c: // Nucleus Little Endian (LE)
1685 case 0x11: // As if user secondary
1686 case 0x19: // As if user secondary LE
1687 case 0x4a: // UPA config
1688 case 0x81: // Secondary
1689 case 0x89: // Secondary LE
1690 // XXX
1691 break;
1692 case 0x45: // LSU
1693 ret = env->lsu;
1694 break;
1695 case 0x50: // I-MMU regs
1697 int reg = (addr >> 3) & 0xf;
1699 ret = env->immuregs[reg];
1700 break;
1702 case 0x51: // I-MMU 8k TSB pointer
1703 case 0x52: // I-MMU 64k TSB pointer
1704 // XXX
1705 break;
1706 case 0x55: // I-MMU data access
1708 int reg = (addr >> 3) & 0x3f;
1710 ret = env->itlb_tte[reg];
1711 break;
1713 case 0x56: // I-MMU tag read
1715 int reg = (addr >> 3) & 0x3f;
1717 ret = env->itlb_tag[reg];
1718 break;
1720 case 0x58: // D-MMU regs
1722 int reg = (addr >> 3) & 0xf;
1724 ret = env->dmmuregs[reg];
1725 break;
1727 case 0x5d: // D-MMU data access
1729 int reg = (addr >> 3) & 0x3f;
1731 ret = env->dtlb_tte[reg];
1732 break;
1734 case 0x5e: // D-MMU tag read
1736 int reg = (addr >> 3) & 0x3f;
1738 ret = env->dtlb_tag[reg];
1739 break;
1741 case 0x46: // D-cache data
1742 case 0x47: // D-cache tag access
1743 case 0x4b: // E-cache error enable
1744 case 0x4c: // E-cache asynchronous fault status
1745 case 0x4d: // E-cache asynchronous fault address
1746 case 0x4e: // E-cache tag data
1747 case 0x66: // I-cache instruction access
1748 case 0x67: // I-cache tag access
1749 case 0x6e: // I-cache predecode
1750 case 0x6f: // I-cache LRU etc.
1751 case 0x76: // E-cache tag
1752 case 0x7e: // E-cache tag
1753 break;
1754 case 0x59: // D-MMU 8k TSB pointer
1755 case 0x5a: // D-MMU 64k TSB pointer
1756 case 0x5b: // D-MMU data pointer
1757 case 0x48: // Interrupt dispatch, RO
1758 case 0x49: // Interrupt data receive
1759 case 0x7f: // Incoming interrupt vector, RO
1760 // XXX
1761 break;
1762 case 0x54: // I-MMU data in, WO
1763 case 0x57: // I-MMU demap, WO
1764 case 0x5c: // D-MMU data in, WO
1765 case 0x5f: // D-MMU demap, WO
1766 case 0x77: // Interrupt vector, WO
1767 default:
1768 do_unassigned_access(addr, 0, 0, 1);
1769 ret = 0;
1770 break;
1773 /* Convert from little endian */
1774 switch (asi) {
1775 case 0x0c: // Nucleus Little Endian (LE)
1776 case 0x18: // As if user primary LE
1777 case 0x19: // As if user secondary LE
1778 case 0x1c: // Bypass LE
1779 case 0x1d: // Bypass, non-cacheable LE
1780 case 0x88: // Primary LE
1781 case 0x89: // Secondary LE
1782 case 0x8a: // Primary no-fault LE
1783 case 0x8b: // Secondary no-fault LE
1784 switch(size) {
1785 case 2:
1786 ret = bswap16(ret);
1787 break;
1788 case 4:
1789 ret = bswap32(ret);
1790 break;
1791 case 8:
1792 ret = bswap64(ret);
1793 break;
1794 default:
1795 break;
1797 default:
1798 break;
1801 /* Convert to signed number */
1802 if (sign) {
1803 switch(size) {
1804 case 1:
1805 ret = (int8_t) ret;
1806 break;
1807 case 2:
1808 ret = (int16_t) ret;
1809 break;
1810 case 4:
1811 ret = (int32_t) ret;
1812 break;
1813 default:
1814 break;
1817 #ifdef DEBUG_ASI
1818 dump_asi("read ", last_addr, asi, size, ret);
1819 #endif
1820 return ret;
1823 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1825 #ifdef DEBUG_ASI
1826 dump_asi("write", addr, asi, size, val);
1827 #endif
1828 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1829 || ((env->def->features & CPU_FEATURE_HYPV)
1830 && asi >= 0x30 && asi < 0x80
1831 && !(env->hpstate & HS_PRIV)))
1832 raise_exception(TT_PRIV_ACT);
1834 helper_check_align(addr, size - 1);
1835 /* Convert to little endian */
1836 switch (asi) {
1837 case 0x0c: // Nucleus Little Endian (LE)
1838 case 0x18: // As if user primary LE
1839 case 0x19: // As if user secondary LE
1840 case 0x1c: // Bypass LE
1841 case 0x1d: // Bypass, non-cacheable LE
1842 case 0x88: // Primary LE
1843 case 0x89: // Secondary LE
1844 switch(size) {
1845 case 2:
1846 addr = bswap16(addr);
1847 break;
1848 case 4:
1849 addr = bswap32(addr);
1850 break;
1851 case 8:
1852 addr = bswap64(addr);
1853 break;
1854 default:
1855 break;
1857 default:
1858 break;
1861 switch(asi) {
1862 case 0x10: // As if user primary
1863 case 0x18: // As if user primary LE
1864 case 0x80: // Primary
1865 case 0x88: // Primary LE
1866 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1867 if ((env->def->features & CPU_FEATURE_HYPV)
1868 && env->hpstate & HS_PRIV) {
1869 switch(size) {
1870 case 1:
1871 stb_hypv(addr, val);
1872 break;
1873 case 2:
1874 stw_hypv(addr, val);
1875 break;
1876 case 4:
1877 stl_hypv(addr, val);
1878 break;
1879 case 8:
1880 default:
1881 stq_hypv(addr, val);
1882 break;
1884 } else {
1885 switch(size) {
1886 case 1:
1887 stb_kernel(addr, val);
1888 break;
1889 case 2:
1890 stw_kernel(addr, val);
1891 break;
1892 case 4:
1893 stl_kernel(addr, val);
1894 break;
1895 case 8:
1896 default:
1897 stq_kernel(addr, val);
1898 break;
1901 } else {
1902 switch(size) {
1903 case 1:
1904 stb_user(addr, val);
1905 break;
1906 case 2:
1907 stw_user(addr, val);
1908 break;
1909 case 4:
1910 stl_user(addr, val);
1911 break;
1912 case 8:
1913 default:
1914 stq_user(addr, val);
1915 break;
1918 break;
1919 case 0x14: // Bypass
1920 case 0x15: // Bypass, non-cacheable
1921 case 0x1c: // Bypass LE
1922 case 0x1d: // Bypass, non-cacheable LE
1924 switch(size) {
1925 case 1:
1926 stb_phys(addr, val);
1927 break;
1928 case 2:
1929 stw_phys(addr, val);
1930 break;
1931 case 4:
1932 stl_phys(addr, val);
1933 break;
1934 case 8:
1935 default:
1936 stq_phys(addr, val);
1937 break;
1940 return;
1941 case 0x24: // Nucleus quad LDD 128 bit atomic
1942 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1943 // Only ldda allowed
1944 raise_exception(TT_ILL_INSN);
1945 return;
1946 case 0x04: // Nucleus
1947 case 0x0c: // Nucleus Little Endian (LE)
1948 case 0x11: // As if user secondary
1949 case 0x19: // As if user secondary LE
1950 case 0x4a: // UPA config
1951 case 0x81: // Secondary
1952 case 0x89: // Secondary LE
1953 // XXX
1954 return;
1955 case 0x45: // LSU
1957 uint64_t oldreg;
1959 oldreg = env->lsu;
1960 env->lsu = val & (DMMU_E | IMMU_E);
1961 // Mappings generated during D/I MMU disabled mode are
1962 // invalid in normal mode
1963 if (oldreg != env->lsu) {
1964 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
1965 oldreg, env->lsu);
1966 #ifdef DEBUG_MMU
1967 dump_mmu(env);
1968 #endif
1969 tlb_flush(env, 1);
1971 return;
1973 case 0x50: // I-MMU regs
1975 int reg = (addr >> 3) & 0xf;
1976 uint64_t oldreg;
1978 oldreg = env->immuregs[reg];
1979 switch(reg) {
1980 case 0: // RO
1981 case 4:
1982 return;
1983 case 1: // Not in I-MMU
1984 case 2:
1985 case 7:
1986 case 8:
1987 return;
1988 case 3: // SFSR
1989 if ((val & 1) == 0)
1990 val = 0; // Clear SFSR
1991 break;
1992 case 5: // TSB access
1993 case 6: // Tag access
1994 default:
1995 break;
1997 env->immuregs[reg] = val;
1998 if (oldreg != env->immuregs[reg]) {
1999 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2000 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
2002 #ifdef DEBUG_MMU
2003 dump_mmu(env);
2004 #endif
2005 return;
2007 case 0x54: // I-MMU data in
2009 unsigned int i;
2011 // Try finding an invalid entry
2012 for (i = 0; i < 64; i++) {
2013 if ((env->itlb_tte[i] & 0x8000000000000000ULL) == 0) {
2014 env->itlb_tag[i] = env->immuregs[6];
2015 env->itlb_tte[i] = val;
2016 return;
2019 // Try finding an unlocked entry
2020 for (i = 0; i < 64; i++) {
2021 if ((env->itlb_tte[i] & 0x40) == 0) {
2022 env->itlb_tag[i] = env->immuregs[6];
2023 env->itlb_tte[i] = val;
2024 return;
2027 // error state?
2028 return;
2030 case 0x55: // I-MMU data access
2032 unsigned int i = (addr >> 3) & 0x3f;
2034 env->itlb_tag[i] = env->immuregs[6];
2035 env->itlb_tte[i] = val;
2036 return;
2038 case 0x57: // I-MMU demap
2039 // XXX
2040 return;
2041 case 0x58: // D-MMU regs
2043 int reg = (addr >> 3) & 0xf;
2044 uint64_t oldreg;
2046 oldreg = env->dmmuregs[reg];
2047 switch(reg) {
2048 case 0: // RO
2049 case 4:
2050 return;
2051 case 3: // SFSR
2052 if ((val & 1) == 0) {
2053 val = 0; // Clear SFSR, Fault address
2054 env->dmmuregs[4] = 0;
2056 env->dmmuregs[reg] = val;
2057 break;
2058 case 1: // Primary context
2059 case 2: // Secondary context
2060 case 5: // TSB access
2061 case 6: // Tag access
2062 case 7: // Virtual Watchpoint
2063 case 8: // Physical Watchpoint
2064 default:
2065 break;
2067 env->dmmuregs[reg] = val;
2068 if (oldreg != env->dmmuregs[reg]) {
2069 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2070 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2072 #ifdef DEBUG_MMU
2073 dump_mmu(env);
2074 #endif
2075 return;
2077 case 0x5c: // D-MMU data in
2079 unsigned int i;
2081 // Try finding an invalid entry
2082 for (i = 0; i < 64; i++) {
2083 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) == 0) {
2084 env->dtlb_tag[i] = env->dmmuregs[6];
2085 env->dtlb_tte[i] = val;
2086 return;
2089 // Try finding an unlocked entry
2090 for (i = 0; i < 64; i++) {
2091 if ((env->dtlb_tte[i] & 0x40) == 0) {
2092 env->dtlb_tag[i] = env->dmmuregs[6];
2093 env->dtlb_tte[i] = val;
2094 return;
2097 // error state?
2098 return;
2100 case 0x5d: // D-MMU data access
2102 unsigned int i = (addr >> 3) & 0x3f;
2104 env->dtlb_tag[i] = env->dmmuregs[6];
2105 env->dtlb_tte[i] = val;
2106 return;
2108 case 0x5f: // D-MMU demap
2109 case 0x49: // Interrupt data receive
2110 // XXX
2111 return;
2112 case 0x46: // D-cache data
2113 case 0x47: // D-cache tag access
2114 case 0x4b: // E-cache error enable
2115 case 0x4c: // E-cache asynchronous fault status
2116 case 0x4d: // E-cache asynchronous fault address
2117 case 0x4e: // E-cache tag data
2118 case 0x66: // I-cache instruction access
2119 case 0x67: // I-cache tag access
2120 case 0x6e: // I-cache predecode
2121 case 0x6f: // I-cache LRU etc.
2122 case 0x76: // E-cache tag
2123 case 0x7e: // E-cache tag
2124 return;
2125 case 0x51: // I-MMU 8k TSB pointer, RO
2126 case 0x52: // I-MMU 64k TSB pointer, RO
2127 case 0x56: // I-MMU tag read, RO
2128 case 0x59: // D-MMU 8k TSB pointer, RO
2129 case 0x5a: // D-MMU 64k TSB pointer, RO
2130 case 0x5b: // D-MMU data pointer, RO
2131 case 0x5e: // D-MMU tag read, RO
2132 case 0x48: // Interrupt dispatch, RO
2133 case 0x7f: // Incoming interrupt vector, RO
2134 case 0x82: // Primary no-fault, RO
2135 case 0x83: // Secondary no-fault, RO
2136 case 0x8a: // Primary no-fault LE, RO
2137 case 0x8b: // Secondary no-fault LE, RO
2138 default:
2139 do_unassigned_access(addr, 1, 0, 1);
2140 return;
2143 #endif /* CONFIG_USER_ONLY */
2145 void helper_ldda_asi(target_ulong addr, int asi, int rd)
2147 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2148 || ((env->def->features & CPU_FEATURE_HYPV)
2149 && asi >= 0x30 && asi < 0x80
2150 && !(env->hpstate & HS_PRIV)))
2151 raise_exception(TT_PRIV_ACT);
2153 switch (asi) {
2154 case 0x24: // Nucleus quad LDD 128 bit atomic
2155 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2156 helper_check_align(addr, 0xf);
2157 if (rd == 0) {
2158 env->gregs[1] = ldq_kernel(addr + 8);
2159 if (asi == 0x2c)
2160 bswap64s(&env->gregs[1]);
2161 } else if (rd < 8) {
2162 env->gregs[rd] = ldq_kernel(addr);
2163 env->gregs[rd + 1] = ldq_kernel(addr + 8);
2164 if (asi == 0x2c) {
2165 bswap64s(&env->gregs[rd]);
2166 bswap64s(&env->gregs[rd + 1]);
2168 } else {
2169 env->regwptr[rd] = ldq_kernel(addr);
2170 env->regwptr[rd + 1] = ldq_kernel(addr + 8);
2171 if (asi == 0x2c) {
2172 bswap64s(&env->regwptr[rd]);
2173 bswap64s(&env->regwptr[rd + 1]);
2176 break;
2177 default:
2178 helper_check_align(addr, 0x3);
2179 if (rd == 0)
2180 env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
2181 else if (rd < 8) {
2182 env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
2183 env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2184 } else {
2185 env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
2186 env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2188 break;
2192 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2194 unsigned int i;
2195 target_ulong val;
2197 helper_check_align(addr, 3);
2198 switch (asi) {
2199 case 0xf0: // Block load primary
2200 case 0xf1: // Block load secondary
2201 case 0xf8: // Block load primary LE
2202 case 0xf9: // Block load secondary LE
2203 if (rd & 7) {
2204 raise_exception(TT_ILL_INSN);
2205 return;
2207 helper_check_align(addr, 0x3f);
2208 for (i = 0; i < 16; i++) {
2209 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
2211 addr += 4;
2214 return;
2215 default:
2216 break;
2219 val = helper_ld_asi(addr, asi, size, 0);
2220 switch(size) {
2221 default:
2222 case 4:
2223 *((uint32_t *)&FT0) = val;
2224 break;
2225 case 8:
2226 *((int64_t *)&DT0) = val;
2227 break;
2228 case 16:
2229 // XXX
2230 break;
2234 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2236 unsigned int i;
2237 target_ulong val = 0;
2239 helper_check_align(addr, 3);
2240 switch (asi) {
2241 case 0xf0: // Block store primary
2242 case 0xf1: // Block store secondary
2243 case 0xf8: // Block store primary LE
2244 case 0xf9: // Block store secondary LE
2245 if (rd & 7) {
2246 raise_exception(TT_ILL_INSN);
2247 return;
2249 helper_check_align(addr, 0x3f);
2250 for (i = 0; i < 16; i++) {
2251 val = *(uint32_t *)&env->fpr[rd++];
2252 helper_st_asi(addr, val, asi & 0x8f, 4);
2253 addr += 4;
2256 return;
2257 default:
2258 break;
2261 switch(size) {
2262 default:
2263 case 4:
2264 val = *((uint32_t *)&FT0);
2265 break;
2266 case 8:
2267 val = *((int64_t *)&DT0);
2268 break;
2269 case 16:
2270 // XXX
2271 break;
2273 helper_st_asi(addr, val, asi, size);
2276 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2277 target_ulong val2, uint32_t asi)
2279 target_ulong ret;
2281 val1 &= 0xffffffffUL;
2282 ret = helper_ld_asi(addr, asi, 4, 0);
2283 ret &= 0xffffffffUL;
2284 if (val1 == ret)
2285 helper_st_asi(addr, val2 & 0xffffffffUL, asi, 4);
2286 return ret;
2289 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2290 target_ulong val2, uint32_t asi)
2292 target_ulong ret;
2294 ret = helper_ld_asi(addr, asi, 8, 0);
2295 if (val1 == ret)
2296 helper_st_asi(addr, val2, asi, 8);
2297 return ret;
2299 #endif /* TARGET_SPARC64 */
2301 #ifndef TARGET_SPARC64
2302 void helper_rett(void)
2304 unsigned int cwp;
2306 if (env->psret == 1)
2307 raise_exception(TT_ILL_INSN);
2309 env->psret = 1;
2310 cwp = cpu_cwp_inc(env, env->cwp + 1) ;
2311 if (env->wim & (1 << cwp)) {
2312 raise_exception(TT_WIN_UNF);
2314 set_cwp(cwp);
2315 env->psrs = env->psrps;
2317 #endif
2319 target_ulong helper_udiv(target_ulong a, target_ulong b)
2321 uint64_t x0;
2322 uint32_t x1;
2324 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2325 x1 = b;
2327 if (x1 == 0) {
2328 raise_exception(TT_DIV_ZERO);
2331 x0 = x0 / x1;
2332 if (x0 > 0xffffffff) {
2333 env->cc_src2 = 1;
2334 return 0xffffffff;
2335 } else {
2336 env->cc_src2 = 0;
2337 return x0;
2341 target_ulong helper_sdiv(target_ulong a, target_ulong b)
2343 int64_t x0;
2344 int32_t x1;
2346 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2347 x1 = b;
2349 if (x1 == 0) {
2350 raise_exception(TT_DIV_ZERO);
2353 x0 = x0 / x1;
2354 if ((int32_t) x0 != x0) {
2355 env->cc_src2 = 1;
2356 return x0 < 0? 0x80000000: 0x7fffffff;
2357 } else {
2358 env->cc_src2 = 0;
2359 return x0;
2363 uint64_t helper_pack64(target_ulong high, target_ulong low)
2365 return ((uint64_t)high << 32) | (uint64_t)(low & 0xffffffff);
2368 void helper_stdf(target_ulong addr, int mem_idx)
2370 helper_check_align(addr, 7);
2371 #if !defined(CONFIG_USER_ONLY)
2372 switch (mem_idx) {
2373 case 0:
2374 stfq_user(addr, DT0);
2375 break;
2376 case 1:
2377 stfq_kernel(addr, DT0);
2378 break;
2379 #ifdef TARGET_SPARC64
2380 case 2:
2381 stfq_hypv(addr, DT0);
2382 break;
2383 #endif
2384 default:
2385 break;
2387 #else
2388 address_mask(env, &addr);
2389 stfq_raw(addr, DT0);
2390 #endif
2393 void helper_lddf(target_ulong addr, int mem_idx)
2395 helper_check_align(addr, 7);
2396 #if !defined(CONFIG_USER_ONLY)
2397 switch (mem_idx) {
2398 case 0:
2399 DT0 = ldfq_user(addr);
2400 break;
2401 case 1:
2402 DT0 = ldfq_kernel(addr);
2403 break;
2404 #ifdef TARGET_SPARC64
2405 case 2:
2406 DT0 = ldfq_hypv(addr);
2407 break;
2408 #endif
2409 default:
2410 break;
2412 #else
2413 address_mask(env, &addr);
2414 DT0 = ldfq_raw(addr);
2415 #endif
2418 void helper_ldqf(target_ulong addr, int mem_idx)
2420 // XXX add 128 bit load
2421 CPU_QuadU u;
2423 helper_check_align(addr, 7);
2424 #if !defined(CONFIG_USER_ONLY)
2425 switch (mem_idx) {
2426 case 0:
2427 u.ll.upper = ldq_user(addr);
2428 u.ll.lower = ldq_user(addr + 8);
2429 QT0 = u.q;
2430 break;
2431 case 1:
2432 u.ll.upper = ldq_kernel(addr);
2433 u.ll.lower = ldq_kernel(addr + 8);
2434 QT0 = u.q;
2435 break;
2436 #ifdef TARGET_SPARC64
2437 case 2:
2438 u.ll.upper = ldq_hypv(addr);
2439 u.ll.lower = ldq_hypv(addr + 8);
2440 QT0 = u.q;
2441 break;
2442 #endif
2443 default:
2444 break;
2446 #else
2447 address_mask(env, &addr);
2448 u.ll.upper = ldq_raw(addr);
2449 u.ll.lower = ldq_raw((addr + 8) & 0xffffffffULL);
2450 QT0 = u.q;
2451 #endif
2454 void helper_stqf(target_ulong addr, int mem_idx)
2456 // XXX add 128 bit store
2457 CPU_QuadU u;
2459 helper_check_align(addr, 7);
2460 #if !defined(CONFIG_USER_ONLY)
2461 switch (mem_idx) {
2462 case 0:
2463 u.q = QT0;
2464 stq_user(addr, u.ll.upper);
2465 stq_user(addr + 8, u.ll.lower);
2466 break;
2467 case 1:
2468 u.q = QT0;
2469 stq_kernel(addr, u.ll.upper);
2470 stq_kernel(addr + 8, u.ll.lower);
2471 break;
2472 #ifdef TARGET_SPARC64
2473 case 2:
2474 u.q = QT0;
2475 stq_hypv(addr, u.ll.upper);
2476 stq_hypv(addr + 8, u.ll.lower);
2477 break;
2478 #endif
2479 default:
2480 break;
2482 #else
2483 u.q = QT0;
2484 address_mask(env, &addr);
2485 stq_raw(addr, u.ll.upper);
2486 stq_raw((addr + 8) & 0xffffffffULL, u.ll.lower);
2487 #endif
2490 void helper_ldfsr(void)
2492 int rnd_mode;
2494 PUT_FSR32(env, *((uint32_t *) &FT0));
2495 switch (env->fsr & FSR_RD_MASK) {
2496 case FSR_RD_NEAREST:
2497 rnd_mode = float_round_nearest_even;
2498 break;
2499 default:
2500 case FSR_RD_ZERO:
2501 rnd_mode = float_round_to_zero;
2502 break;
2503 case FSR_RD_POS:
2504 rnd_mode = float_round_up;
2505 break;
2506 case FSR_RD_NEG:
2507 rnd_mode = float_round_down;
2508 break;
2510 set_float_rounding_mode(rnd_mode, &env->fp_status);
2513 void helper_stfsr(void)
2515 *((uint32_t *) &FT0) = GET_FSR32(env);
2518 void helper_debug(void)
2520 env->exception_index = EXCP_DEBUG;
2521 cpu_loop_exit();
2524 #ifndef TARGET_SPARC64
2525 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2526 handling ? */
2527 void helper_save(void)
2529 uint32_t cwp;
2531 cwp = cpu_cwp_dec(env, env->cwp - 1);
2532 if (env->wim & (1 << cwp)) {
2533 raise_exception(TT_WIN_OVF);
2535 set_cwp(cwp);
2538 void helper_restore(void)
2540 uint32_t cwp;
2542 cwp = cpu_cwp_inc(env, env->cwp + 1);
2543 if (env->wim & (1 << cwp)) {
2544 raise_exception(TT_WIN_UNF);
2546 set_cwp(cwp);
2549 void helper_wrpsr(target_ulong new_psr)
2551 if ((new_psr & PSR_CWP) >= env->nwindows)
2552 raise_exception(TT_ILL_INSN);
2553 else
2554 PUT_PSR(env, new_psr);
2557 target_ulong helper_rdpsr(void)
2559 return GET_PSR(env);
2562 #else
2563 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2564 handling ? */
2565 void helper_save(void)
2567 uint32_t cwp;
2569 cwp = cpu_cwp_dec(env, env->cwp - 1);
2570 if (env->cansave == 0) {
2571 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2572 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2573 ((env->wstate & 0x7) << 2)));
2574 } else {
2575 if (env->cleanwin - env->canrestore == 0) {
2576 // XXX Clean windows without trap
2577 raise_exception(TT_CLRWIN);
2578 } else {
2579 env->cansave--;
2580 env->canrestore++;
2581 set_cwp(cwp);
2586 void helper_restore(void)
2588 uint32_t cwp;
2590 cwp = cpu_cwp_inc(env, env->cwp + 1);
2591 if (env->canrestore == 0) {
2592 raise_exception(TT_FILL | (env->otherwin != 0 ?
2593 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2594 ((env->wstate & 0x7) << 2)));
2595 } else {
2596 env->cansave++;
2597 env->canrestore--;
2598 set_cwp(cwp);
2602 void helper_flushw(void)
2604 if (env->cansave != env->nwindows - 2) {
2605 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2606 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2607 ((env->wstate & 0x7) << 2)));
2611 void helper_saved(void)
2613 env->cansave++;
2614 if (env->otherwin == 0)
2615 env->canrestore--;
2616 else
2617 env->otherwin--;
2620 void helper_restored(void)
2622 env->canrestore++;
2623 if (env->cleanwin < env->nwindows - 1)
2624 env->cleanwin++;
2625 if (env->otherwin == 0)
2626 env->cansave--;
2627 else
2628 env->otherwin--;
2631 target_ulong helper_rdccr(void)
2633 return GET_CCR(env);
2636 void helper_wrccr(target_ulong new_ccr)
2638 PUT_CCR(env, new_ccr);
2641 // CWP handling is reversed in V9, but we still use the V8 register
2642 // order.
2643 target_ulong helper_rdcwp(void)
2645 return GET_CWP64(env);
2648 void helper_wrcwp(target_ulong new_cwp)
2650 PUT_CWP64(env, new_cwp);
2653 // This function uses non-native bit order
2654 #define GET_FIELD(X, FROM, TO) \
2655 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
2657 // This function uses the order in the manuals, i.e. bit 0 is 2^0
2658 #define GET_FIELD_SP(X, FROM, TO) \
2659 GET_FIELD(X, 63 - (TO), 63 - (FROM))
2661 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
2663 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
2664 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
2665 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
2666 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
2667 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
2668 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
2669 (((pixel_addr >> 55) & 1) << 4) |
2670 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
2671 GET_FIELD_SP(pixel_addr, 11, 12);
2674 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
2676 uint64_t tmp;
2678 tmp = addr + offset;
2679 env->gsr &= ~7ULL;
2680 env->gsr |= tmp & 7ULL;
2681 return tmp & ~7ULL;
2684 target_ulong helper_popc(target_ulong val)
2686 return ctpop64(val);
2689 static inline uint64_t *get_gregset(uint64_t pstate)
2691 switch (pstate) {
2692 default:
2693 case 0:
2694 return env->bgregs;
2695 case PS_AG:
2696 return env->agregs;
2697 case PS_MG:
2698 return env->mgregs;
2699 case PS_IG:
2700 return env->igregs;
2704 static inline void change_pstate(uint64_t new_pstate)
2706 uint64_t pstate_regs, new_pstate_regs;
2707 uint64_t *src, *dst;
2709 pstate_regs = env->pstate & 0xc01;
2710 new_pstate_regs = new_pstate & 0xc01;
2711 if (new_pstate_regs != pstate_regs) {
2712 // Switch global register bank
2713 src = get_gregset(new_pstate_regs);
2714 dst = get_gregset(pstate_regs);
2715 memcpy32(dst, env->gregs);
2716 memcpy32(env->gregs, src);
2718 env->pstate = new_pstate;
2721 void helper_wrpstate(target_ulong new_state)
2723 if (!(env->def->features & CPU_FEATURE_GL))
2724 change_pstate(new_state & 0xf3f);
2727 void helper_done(void)
2729 env->pc = env->tsptr->tpc;
2730 env->npc = env->tsptr->tnpc + 4;
2731 PUT_CCR(env, env->tsptr->tstate >> 32);
2732 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2733 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2734 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2735 env->tl--;
2736 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2739 void helper_retry(void)
2741 env->pc = env->tsptr->tpc;
2742 env->npc = env->tsptr->tnpc;
2743 PUT_CCR(env, env->tsptr->tstate >> 32);
2744 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2745 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2746 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2747 env->tl--;
2748 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2750 #endif
2752 void helper_flush(target_ulong addr)
2754 addr &= ~7;
2755 tb_invalidate_page_range(addr, addr + 8);
2758 #ifdef TARGET_SPARC64
2759 #ifdef DEBUG_PCALL
2760 static const char * const excp_names[0x80] = {
2761 [TT_TFAULT] = "Instruction Access Fault",
2762 [TT_TMISS] = "Instruction Access MMU Miss",
2763 [TT_CODE_ACCESS] = "Instruction Access Error",
2764 [TT_ILL_INSN] = "Illegal Instruction",
2765 [TT_PRIV_INSN] = "Privileged Instruction",
2766 [TT_NFPU_INSN] = "FPU Disabled",
2767 [TT_FP_EXCP] = "FPU Exception",
2768 [TT_TOVF] = "Tag Overflow",
2769 [TT_CLRWIN] = "Clean Windows",
2770 [TT_DIV_ZERO] = "Division By Zero",
2771 [TT_DFAULT] = "Data Access Fault",
2772 [TT_DMISS] = "Data Access MMU Miss",
2773 [TT_DATA_ACCESS] = "Data Access Error",
2774 [TT_DPROT] = "Data Protection Error",
2775 [TT_UNALIGNED] = "Unaligned Memory Access",
2776 [TT_PRIV_ACT] = "Privileged Action",
2777 [TT_EXTINT | 0x1] = "External Interrupt 1",
2778 [TT_EXTINT | 0x2] = "External Interrupt 2",
2779 [TT_EXTINT | 0x3] = "External Interrupt 3",
2780 [TT_EXTINT | 0x4] = "External Interrupt 4",
2781 [TT_EXTINT | 0x5] = "External Interrupt 5",
2782 [TT_EXTINT | 0x6] = "External Interrupt 6",
2783 [TT_EXTINT | 0x7] = "External Interrupt 7",
2784 [TT_EXTINT | 0x8] = "External Interrupt 8",
2785 [TT_EXTINT | 0x9] = "External Interrupt 9",
2786 [TT_EXTINT | 0xa] = "External Interrupt 10",
2787 [TT_EXTINT | 0xb] = "External Interrupt 11",
2788 [TT_EXTINT | 0xc] = "External Interrupt 12",
2789 [TT_EXTINT | 0xd] = "External Interrupt 13",
2790 [TT_EXTINT | 0xe] = "External Interrupt 14",
2791 [TT_EXTINT | 0xf] = "External Interrupt 15",
2793 #endif
2795 void do_interrupt(CPUState *env)
2797 int intno = env->exception_index;
2799 #ifdef DEBUG_PCALL
2800 if (loglevel & CPU_LOG_INT) {
2801 static int count;
2802 const char *name;
2804 if (intno < 0 || intno >= 0x180)
2805 name = "Unknown";
2806 else if (intno >= 0x100)
2807 name = "Trap Instruction";
2808 else if (intno >= 0xc0)
2809 name = "Window Fill";
2810 else if (intno >= 0x80)
2811 name = "Window Spill";
2812 else {
2813 name = excp_names[intno];
2814 if (!name)
2815 name = "Unknown";
2818 fprintf(logfile, "%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
2819 " SP=%016" PRIx64 "\n",
2820 count, name, intno,
2821 env->pc,
2822 env->npc, env->regwptr[6]);
2823 cpu_dump_state(env, logfile, fprintf, 0);
2824 #if 0
2826 int i;
2827 uint8_t *ptr;
2829 fprintf(logfile, " code=");
2830 ptr = (uint8_t *)env->pc;
2831 for(i = 0; i < 16; i++) {
2832 fprintf(logfile, " %02x", ldub(ptr + i));
2834 fprintf(logfile, "\n");
2836 #endif
2837 count++;
2839 #endif
2840 #if !defined(CONFIG_USER_ONLY)
2841 if (env->tl >= env->maxtl) {
2842 cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
2843 " Error state", env->exception_index, env->tl, env->maxtl);
2844 return;
2846 #endif
2847 if (env->tl < env->maxtl - 1) {
2848 env->tl++;
2849 } else {
2850 env->pstate |= PS_RED;
2851 if (env->tl < env->maxtl)
2852 env->tl++;
2854 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2855 env->tsptr->tstate = ((uint64_t)GET_CCR(env) << 32) |
2856 ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
2857 GET_CWP64(env);
2858 env->tsptr->tpc = env->pc;
2859 env->tsptr->tnpc = env->npc;
2860 env->tsptr->tt = intno;
2861 if (!(env->def->features & CPU_FEATURE_GL)) {
2862 switch (intno) {
2863 case TT_IVEC:
2864 change_pstate(PS_PEF | PS_PRIV | PS_IG);
2865 break;
2866 case TT_TFAULT:
2867 case TT_TMISS:
2868 case TT_DFAULT:
2869 case TT_DMISS:
2870 case TT_DPROT:
2871 change_pstate(PS_PEF | PS_PRIV | PS_MG);
2872 break;
2873 default:
2874 change_pstate(PS_PEF | PS_PRIV | PS_AG);
2875 break;
2878 if (intno == TT_CLRWIN)
2879 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
2880 else if ((intno & 0x1c0) == TT_SPILL)
2881 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
2882 else if ((intno & 0x1c0) == TT_FILL)
2883 cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
2884 env->tbr &= ~0x7fffULL;
2885 env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
2886 env->pc = env->tbr;
2887 env->npc = env->pc + 4;
2888 env->exception_index = 0;
2890 #else
2891 #ifdef DEBUG_PCALL
2892 static const char * const excp_names[0x80] = {
2893 [TT_TFAULT] = "Instruction Access Fault",
2894 [TT_ILL_INSN] = "Illegal Instruction",
2895 [TT_PRIV_INSN] = "Privileged Instruction",
2896 [TT_NFPU_INSN] = "FPU Disabled",
2897 [TT_WIN_OVF] = "Window Overflow",
2898 [TT_WIN_UNF] = "Window Underflow",
2899 [TT_UNALIGNED] = "Unaligned Memory Access",
2900 [TT_FP_EXCP] = "FPU Exception",
2901 [TT_DFAULT] = "Data Access Fault",
2902 [TT_TOVF] = "Tag Overflow",
2903 [TT_EXTINT | 0x1] = "External Interrupt 1",
2904 [TT_EXTINT | 0x2] = "External Interrupt 2",
2905 [TT_EXTINT | 0x3] = "External Interrupt 3",
2906 [TT_EXTINT | 0x4] = "External Interrupt 4",
2907 [TT_EXTINT | 0x5] = "External Interrupt 5",
2908 [TT_EXTINT | 0x6] = "External Interrupt 6",
2909 [TT_EXTINT | 0x7] = "External Interrupt 7",
2910 [TT_EXTINT | 0x8] = "External Interrupt 8",
2911 [TT_EXTINT | 0x9] = "External Interrupt 9",
2912 [TT_EXTINT | 0xa] = "External Interrupt 10",
2913 [TT_EXTINT | 0xb] = "External Interrupt 11",
2914 [TT_EXTINT | 0xc] = "External Interrupt 12",
2915 [TT_EXTINT | 0xd] = "External Interrupt 13",
2916 [TT_EXTINT | 0xe] = "External Interrupt 14",
2917 [TT_EXTINT | 0xf] = "External Interrupt 15",
2918 [TT_TOVF] = "Tag Overflow",
2919 [TT_CODE_ACCESS] = "Instruction Access Error",
2920 [TT_DATA_ACCESS] = "Data Access Error",
2921 [TT_DIV_ZERO] = "Division By Zero",
2922 [TT_NCP_INSN] = "Coprocessor Disabled",
2924 #endif
2926 void do_interrupt(CPUState *env)
2928 int cwp, intno = env->exception_index;
2930 #ifdef DEBUG_PCALL
2931 if (loglevel & CPU_LOG_INT) {
2932 static int count;
2933 const char *name;
2935 if (intno < 0 || intno >= 0x100)
2936 name = "Unknown";
2937 else if (intno >= 0x80)
2938 name = "Trap Instruction";
2939 else {
2940 name = excp_names[intno];
2941 if (!name)
2942 name = "Unknown";
2945 fprintf(logfile, "%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
2946 count, name, intno,
2947 env->pc,
2948 env->npc, env->regwptr[6]);
2949 cpu_dump_state(env, logfile, fprintf, 0);
2950 #if 0
2952 int i;
2953 uint8_t *ptr;
2955 fprintf(logfile, " code=");
2956 ptr = (uint8_t *)env->pc;
2957 for(i = 0; i < 16; i++) {
2958 fprintf(logfile, " %02x", ldub(ptr + i));
2960 fprintf(logfile, "\n");
2962 #endif
2963 count++;
2965 #endif
2966 #if !defined(CONFIG_USER_ONLY)
2967 if (env->psret == 0) {
2968 cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
2969 env->exception_index);
2970 return;
2972 #endif
2973 env->psret = 0;
2974 cwp = cpu_cwp_dec(env, env->cwp - 1);
2975 cpu_set_cwp(env, cwp);
2976 env->regwptr[9] = env->pc;
2977 env->regwptr[10] = env->npc;
2978 env->psrps = env->psrs;
2979 env->psrs = 1;
2980 env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
2981 env->pc = env->tbr;
2982 env->npc = env->pc + 4;
2983 env->exception_index = 0;
2985 #endif
2987 #if !defined(CONFIG_USER_ONLY)
2989 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
2990 void *retaddr);
2992 #define MMUSUFFIX _mmu
2993 #define ALIGNED_ONLY
2995 #define SHIFT 0
2996 #include "softmmu_template.h"
2998 #define SHIFT 1
2999 #include "softmmu_template.h"
3001 #define SHIFT 2
3002 #include "softmmu_template.h"
3004 #define SHIFT 3
3005 #include "softmmu_template.h"
3007 /* XXX: make it generic ? */
3008 static void cpu_restore_state2(void *retaddr)
3010 TranslationBlock *tb;
3011 unsigned long pc;
3013 if (retaddr) {
3014 /* now we have a real cpu fault */
3015 pc = (unsigned long)retaddr;
3016 tb = tb_find_pc(pc);
3017 if (tb) {
3018 /* the PC is inside the translated code. It means that we have
3019 a virtual CPU fault */
3020 cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
3025 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3026 void *retaddr)
3028 #ifdef DEBUG_UNALIGNED
3029 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
3030 "\n", addr, env->pc);
3031 #endif
3032 cpu_restore_state2(retaddr);
3033 raise_exception(TT_UNALIGNED);
3036 /* try to fill the TLB and return an exception if error. If retaddr is
3037 NULL, it means that the function was called in C code (i.e. not
3038 from generated code or from helper.c) */
3039 /* XXX: fix it to restore all registers */
3040 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3042 int ret;
3043 CPUState *saved_env;
3045 /* XXX: hack to restore env in all cases, even if not called from
3046 generated code */
3047 saved_env = env;
3048 env = cpu_single_env;
3050 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3051 if (ret) {
3052 cpu_restore_state2(retaddr);
3053 cpu_loop_exit();
3055 env = saved_env;
3058 #endif
3060 #ifndef TARGET_SPARC64
3061 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3062 int is_asi)
3064 CPUState *saved_env;
3066 /* XXX: hack to restore env in all cases, even if not called from
3067 generated code */
3068 saved_env = env;
3069 env = cpu_single_env;
3070 #ifdef DEBUG_UNASSIGNED
3071 if (is_asi)
3072 printf("Unassigned mem %s access to " TARGET_FMT_plx
3073 " asi 0x%02x from " TARGET_FMT_lx "\n",
3074 is_exec ? "exec" : is_write ? "write" : "read", addr, is_asi,
3075 env->pc);
3076 else
3077 printf("Unassigned mem %s access to " TARGET_FMT_plx " from "
3078 TARGET_FMT_lx "\n",
3079 is_exec ? "exec" : is_write ? "write" : "read", addr, env->pc);
3080 #endif
3081 if (env->mmuregs[3]) /* Fault status register */
3082 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
3083 if (is_asi)
3084 env->mmuregs[3] |= 1 << 16;
3085 if (env->psrs)
3086 env->mmuregs[3] |= 1 << 5;
3087 if (is_exec)
3088 env->mmuregs[3] |= 1 << 6;
3089 if (is_write)
3090 env->mmuregs[3] |= 1 << 7;
3091 env->mmuregs[3] |= (5 << 2) | 2;
3092 env->mmuregs[4] = addr; /* Fault address register */
3093 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
3094 if (is_exec)
3095 raise_exception(TT_CODE_ACCESS);
3096 else
3097 raise_exception(TT_DATA_ACCESS);
3099 env = saved_env;
3101 #else
3102 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3103 int is_asi)
3105 #ifdef DEBUG_UNASSIGNED
3106 CPUState *saved_env;
3108 /* XXX: hack to restore env in all cases, even if not called from
3109 generated code */
3110 saved_env = env;
3111 env = cpu_single_env;
3112 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
3113 "\n", addr, env->pc);
3114 env = saved_env;
3115 #endif
3116 if (is_exec)
3117 raise_exception(TT_CODE_ACCESS);
3118 else
3119 raise_exception(TT_DATA_ACCESS);
3121 #endif