Convert basic float32 ops to TCG
[qemu/mini2440.git] / target-sparc / op_helper.c
blob84ed6f6c4632ac07827d3e3931273949ca4ad4d2
1 #include "exec.h"
2 #include "host-utils.h"
3 #include "helper.h"
4 #if !defined(CONFIG_USER_ONLY)
5 #include "softmmu_exec.h"
6 #endif /* !defined(CONFIG_USER_ONLY) */
8 //#define DEBUG_MMU
9 //#define DEBUG_MXCC
10 //#define DEBUG_UNALIGNED
11 //#define DEBUG_UNASSIGNED
12 //#define DEBUG_ASI
14 #ifdef DEBUG_MMU
15 #define DPRINTF_MMU(fmt, args...) \
16 do { printf("MMU: " fmt , ##args); } while (0)
17 #else
18 #define DPRINTF_MMU(fmt, args...) do {} while (0)
19 #endif
21 #ifdef DEBUG_MXCC
22 #define DPRINTF_MXCC(fmt, args...) \
23 do { printf("MXCC: " fmt , ##args); } while (0)
24 #else
25 #define DPRINTF_MXCC(fmt, args...) do {} while (0)
26 #endif
28 #ifdef DEBUG_ASI
29 #define DPRINTF_ASI(fmt, args...) \
30 do { printf("ASI: " fmt , ##args); } while (0)
31 #else
32 #define DPRINTF_ASI(fmt, args...) do {} while (0)
33 #endif
35 #ifdef TARGET_SPARC64
36 #ifndef TARGET_ABI32
37 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
38 #else
39 #define AM_CHECK(env1) (1)
40 #endif
41 #endif
43 static inline void address_mask(CPUState *env1, target_ulong *addr)
45 #ifdef TARGET_SPARC64
46 if (AM_CHECK(env1))
47 *addr &= 0xffffffffULL;
48 #endif
51 void raise_exception(int tt)
53 env->exception_index = tt;
54 cpu_loop_exit();
57 void helper_trap(target_ulong nb_trap)
59 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
60 cpu_loop_exit();
63 void helper_trapcc(target_ulong nb_trap, target_ulong do_trap)
65 if (do_trap) {
66 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
67 cpu_loop_exit();
71 static inline void set_cwp(int new_cwp)
73 cpu_set_cwp(env, new_cwp);
76 void helper_check_align(target_ulong addr, uint32_t align)
78 if (addr & align) {
79 #ifdef DEBUG_UNALIGNED
80 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
81 "\n", addr, env->pc);
82 #endif
83 raise_exception(TT_UNALIGNED);
87 #define F_HELPER(name, p) void helper_f##name##p(void)
89 #define F_BINOP(name) \
90 float32 helper_f ## name ## s (float32 src1, float32 src2) \
91 { \
92 return float32_ ## name (src1, src2, &env->fp_status); \
93 } \
94 F_HELPER(name, d) \
95 { \
96 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
97 } \
98 F_HELPER(name, q) \
99 { \
100 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
103 F_BINOP(add);
104 F_BINOP(sub);
105 F_BINOP(mul);
106 F_BINOP(div);
107 #undef F_BINOP
109 void helper_fsmuld(void)
111 DT0 = float64_mul(float32_to_float64(FT0, &env->fp_status),
112 float32_to_float64(FT1, &env->fp_status),
113 &env->fp_status);
116 void helper_fdmulq(void)
118 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
119 float64_to_float128(DT1, &env->fp_status),
120 &env->fp_status);
123 float32 helper_fnegs(float32 src)
125 return float32_chs(src);
128 #ifdef TARGET_SPARC64
129 F_HELPER(neg, d)
131 DT0 = float64_chs(DT1);
134 F_HELPER(neg, q)
136 QT0 = float128_chs(QT1);
138 #endif
140 /* Integer to float conversion. */
141 float32 helper_fitos(int32_t src)
143 return int32_to_float32(src, &env->fp_status);
146 F_HELPER(ito, d)
148 DT0 = int32_to_float64(*((int32_t *)&FT1), &env->fp_status);
151 F_HELPER(ito, q)
153 QT0 = int32_to_float128(*((int32_t *)&FT1), &env->fp_status);
156 #ifdef TARGET_SPARC64
157 F_HELPER(xto, s)
159 FT0 = int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
162 F_HELPER(xto, d)
164 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
167 F_HELPER(xto, q)
169 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
171 #endif
172 #undef F_HELPER
174 /* floating point conversion */
175 void helper_fdtos(void)
177 FT0 = float64_to_float32(DT1, &env->fp_status);
180 void helper_fstod(void)
182 DT0 = float32_to_float64(FT1, &env->fp_status);
185 void helper_fqtos(void)
187 FT0 = float128_to_float32(QT1, &env->fp_status);
190 void helper_fstoq(void)
192 QT0 = float32_to_float128(FT1, &env->fp_status);
195 void helper_fqtod(void)
197 DT0 = float128_to_float64(QT1, &env->fp_status);
200 void helper_fdtoq(void)
202 QT0 = float64_to_float128(DT1, &env->fp_status);
205 /* Float to integer conversion. */
206 int32_t helper_fstoi(float32 src)
208 return float32_to_int32_round_to_zero(src, &env->fp_status);
211 void helper_fdtoi(void)
213 *((int32_t *)&FT0) = float64_to_int32_round_to_zero(DT1, &env->fp_status);
216 void helper_fqtoi(void)
218 *((int32_t *)&FT0) = float128_to_int32_round_to_zero(QT1, &env->fp_status);
221 #ifdef TARGET_SPARC64
222 void helper_fstox(void)
224 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(FT1, &env->fp_status);
227 void helper_fdtox(void)
229 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
232 void helper_fqtox(void)
234 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
237 void helper_faligndata(void)
239 uint64_t tmp;
241 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
242 /* on many architectures a shift of 64 does nothing */
243 if ((env->gsr & 7) != 0) {
244 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
246 *((uint64_t *)&DT0) = tmp;
249 void helper_movl_FT0_0(void)
251 *((uint32_t *)&FT0) = 0;
254 void helper_movl_DT0_0(void)
256 *((uint64_t *)&DT0) = 0;
259 void helper_movl_FT0_1(void)
261 *((uint32_t *)&FT0) = 0xffffffff;
264 void helper_movl_DT0_1(void)
266 *((uint64_t *)&DT0) = 0xffffffffffffffffULL;
269 void helper_fnot(void)
271 *(uint64_t *)&DT0 = ~*(uint64_t *)&DT1;
274 void helper_fnots(void)
276 *(uint32_t *)&FT0 = ~*(uint32_t *)&FT1;
279 void helper_fnor(void)
281 *(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 | *(uint64_t *)&DT1);
284 void helper_fnors(void)
286 *(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 | *(uint32_t *)&FT1);
289 void helper_for(void)
291 *(uint64_t *)&DT0 |= *(uint64_t *)&DT1;
294 void helper_fors(void)
296 *(uint32_t *)&FT0 |= *(uint32_t *)&FT1;
299 void helper_fxor(void)
301 *(uint64_t *)&DT0 ^= *(uint64_t *)&DT1;
304 void helper_fxors(void)
306 *(uint32_t *)&FT0 ^= *(uint32_t *)&FT1;
309 void helper_fand(void)
311 *(uint64_t *)&DT0 &= *(uint64_t *)&DT1;
314 void helper_fands(void)
316 *(uint32_t *)&FT0 &= *(uint32_t *)&FT1;
319 void helper_fornot(void)
321 *(uint64_t *)&DT0 = *(uint64_t *)&DT0 | ~*(uint64_t *)&DT1;
324 void helper_fornots(void)
326 *(uint32_t *)&FT0 = *(uint32_t *)&FT0 | ~*(uint32_t *)&FT1;
329 void helper_fandnot(void)
331 *(uint64_t *)&DT0 = *(uint64_t *)&DT0 & ~*(uint64_t *)&DT1;
334 void helper_fandnots(void)
336 *(uint32_t *)&FT0 = *(uint32_t *)&FT0 & ~*(uint32_t *)&FT1;
339 void helper_fnand(void)
341 *(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 & *(uint64_t *)&DT1);
344 void helper_fnands(void)
346 *(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 & *(uint32_t *)&FT1);
349 void helper_fxnor(void)
351 *(uint64_t *)&DT0 ^= ~*(uint64_t *)&DT1;
354 void helper_fxnors(void)
356 *(uint32_t *)&FT0 ^= ~*(uint32_t *)&FT1;
359 #ifdef WORDS_BIGENDIAN
360 #define VIS_B64(n) b[7 - (n)]
361 #define VIS_W64(n) w[3 - (n)]
362 #define VIS_SW64(n) sw[3 - (n)]
363 #define VIS_L64(n) l[1 - (n)]
364 #define VIS_B32(n) b[3 - (n)]
365 #define VIS_W32(n) w[1 - (n)]
366 #else
367 #define VIS_B64(n) b[n]
368 #define VIS_W64(n) w[n]
369 #define VIS_SW64(n) sw[n]
370 #define VIS_L64(n) l[n]
371 #define VIS_B32(n) b[n]
372 #define VIS_W32(n) w[n]
373 #endif
375 typedef union {
376 uint8_t b[8];
377 uint16_t w[4];
378 int16_t sw[4];
379 uint32_t l[2];
380 float64 d;
381 } vis64;
383 typedef union {
384 uint8_t b[4];
385 uint16_t w[2];
386 uint32_t l;
387 float32 f;
388 } vis32;
390 void helper_fpmerge(void)
392 vis64 s, d;
394 s.d = DT0;
395 d.d = DT1;
397 // Reverse calculation order to handle overlap
398 d.VIS_B64(7) = s.VIS_B64(3);
399 d.VIS_B64(6) = d.VIS_B64(3);
400 d.VIS_B64(5) = s.VIS_B64(2);
401 d.VIS_B64(4) = d.VIS_B64(2);
402 d.VIS_B64(3) = s.VIS_B64(1);
403 d.VIS_B64(2) = d.VIS_B64(1);
404 d.VIS_B64(1) = s.VIS_B64(0);
405 //d.VIS_B64(0) = d.VIS_B64(0);
407 DT0 = d.d;
410 void helper_fmul8x16(void)
412 vis64 s, d;
413 uint32_t tmp;
415 s.d = DT0;
416 d.d = DT1;
418 #define PMUL(r) \
419 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
420 if ((tmp & 0xff) > 0x7f) \
421 tmp += 0x100; \
422 d.VIS_W64(r) = tmp >> 8;
424 PMUL(0);
425 PMUL(1);
426 PMUL(2);
427 PMUL(3);
428 #undef PMUL
430 DT0 = d.d;
433 void helper_fmul8x16al(void)
435 vis64 s, d;
436 uint32_t tmp;
438 s.d = DT0;
439 d.d = DT1;
441 #define PMUL(r) \
442 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
443 if ((tmp & 0xff) > 0x7f) \
444 tmp += 0x100; \
445 d.VIS_W64(r) = tmp >> 8;
447 PMUL(0);
448 PMUL(1);
449 PMUL(2);
450 PMUL(3);
451 #undef PMUL
453 DT0 = d.d;
456 void helper_fmul8x16au(void)
458 vis64 s, d;
459 uint32_t tmp;
461 s.d = DT0;
462 d.d = DT1;
464 #define PMUL(r) \
465 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
466 if ((tmp & 0xff) > 0x7f) \
467 tmp += 0x100; \
468 d.VIS_W64(r) = tmp >> 8;
470 PMUL(0);
471 PMUL(1);
472 PMUL(2);
473 PMUL(3);
474 #undef PMUL
476 DT0 = d.d;
479 void helper_fmul8sux16(void)
481 vis64 s, d;
482 uint32_t tmp;
484 s.d = DT0;
485 d.d = DT1;
487 #define PMUL(r) \
488 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
489 if ((tmp & 0xff) > 0x7f) \
490 tmp += 0x100; \
491 d.VIS_W64(r) = tmp >> 8;
493 PMUL(0);
494 PMUL(1);
495 PMUL(2);
496 PMUL(3);
497 #undef PMUL
499 DT0 = d.d;
502 void helper_fmul8ulx16(void)
504 vis64 s, d;
505 uint32_t tmp;
507 s.d = DT0;
508 d.d = DT1;
510 #define PMUL(r) \
511 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
512 if ((tmp & 0xff) > 0x7f) \
513 tmp += 0x100; \
514 d.VIS_W64(r) = tmp >> 8;
516 PMUL(0);
517 PMUL(1);
518 PMUL(2);
519 PMUL(3);
520 #undef PMUL
522 DT0 = d.d;
525 void helper_fmuld8sux16(void)
527 vis64 s, d;
528 uint32_t tmp;
530 s.d = DT0;
531 d.d = DT1;
533 #define PMUL(r) \
534 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
535 if ((tmp & 0xff) > 0x7f) \
536 tmp += 0x100; \
537 d.VIS_L64(r) = tmp;
539 // Reverse calculation order to handle overlap
540 PMUL(1);
541 PMUL(0);
542 #undef PMUL
544 DT0 = d.d;
547 void helper_fmuld8ulx16(void)
549 vis64 s, d;
550 uint32_t tmp;
552 s.d = DT0;
553 d.d = DT1;
555 #define PMUL(r) \
556 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
557 if ((tmp & 0xff) > 0x7f) \
558 tmp += 0x100; \
559 d.VIS_L64(r) = tmp;
561 // Reverse calculation order to handle overlap
562 PMUL(1);
563 PMUL(0);
564 #undef PMUL
566 DT0 = d.d;
569 void helper_fexpand(void)
571 vis32 s;
572 vis64 d;
574 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
575 d.d = DT1;
576 d.VIS_L64(0) = s.VIS_W32(0) << 4;
577 d.VIS_L64(1) = s.VIS_W32(1) << 4;
578 d.VIS_L64(2) = s.VIS_W32(2) << 4;
579 d.VIS_L64(3) = s.VIS_W32(3) << 4;
581 DT0 = d.d;
584 #define VIS_HELPER(name, F) \
585 void name##16(void) \
587 vis64 s, d; \
589 s.d = DT0; \
590 d.d = DT1; \
592 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
593 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
594 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
595 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
597 DT0 = d.d; \
600 void name##16s(void) \
602 vis32 s, d; \
604 s.f = FT0; \
605 d.f = FT1; \
607 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
608 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
610 FT0 = d.f; \
613 void name##32(void) \
615 vis64 s, d; \
617 s.d = DT0; \
618 d.d = DT1; \
620 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
621 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
623 DT0 = d.d; \
626 void name##32s(void) \
628 vis32 s, d; \
630 s.f = FT0; \
631 d.f = FT1; \
633 d.l = F(d.l, s.l); \
635 FT0 = d.f; \
638 #define FADD(a, b) ((a) + (b))
639 #define FSUB(a, b) ((a) - (b))
640 VIS_HELPER(helper_fpadd, FADD)
641 VIS_HELPER(helper_fpsub, FSUB)
643 #define VIS_CMPHELPER(name, F) \
644 void name##16(void) \
646 vis64 s, d; \
648 s.d = DT0; \
649 d.d = DT1; \
651 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
652 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
653 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
654 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
656 DT0 = d.d; \
659 void name##32(void) \
661 vis64 s, d; \
663 s.d = DT0; \
664 d.d = DT1; \
666 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
667 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
669 DT0 = d.d; \
672 #define FCMPGT(a, b) ((a) > (b))
673 #define FCMPEQ(a, b) ((a) == (b))
674 #define FCMPLE(a, b) ((a) <= (b))
675 #define FCMPNE(a, b) ((a) != (b))
677 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
678 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
679 VIS_CMPHELPER(helper_fcmple, FCMPLE)
680 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
681 #endif
683 void helper_check_ieee_exceptions(void)
685 target_ulong status;
687 status = get_float_exception_flags(&env->fp_status);
688 if (status) {
689 /* Copy IEEE 754 flags into FSR */
690 if (status & float_flag_invalid)
691 env->fsr |= FSR_NVC;
692 if (status & float_flag_overflow)
693 env->fsr |= FSR_OFC;
694 if (status & float_flag_underflow)
695 env->fsr |= FSR_UFC;
696 if (status & float_flag_divbyzero)
697 env->fsr |= FSR_DZC;
698 if (status & float_flag_inexact)
699 env->fsr |= FSR_NXC;
701 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
702 /* Unmasked exception, generate a trap */
703 env->fsr |= FSR_FTT_IEEE_EXCP;
704 raise_exception(TT_FP_EXCP);
705 } else {
706 /* Accumulate exceptions */
707 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
712 void helper_clear_float_exceptions(void)
714 set_float_exception_flags(0, &env->fp_status);
717 float32 helper_fabss(float32 src)
719 return float32_abs(src);
722 #ifdef TARGET_SPARC64
723 void helper_fabsd(void)
725 DT0 = float64_abs(DT1);
728 void helper_fabsq(void)
730 QT0 = float128_abs(QT1);
732 #endif
734 float32 helper_fsqrts(float32 src)
736 return float32_sqrt(src, &env->fp_status);
739 void helper_fsqrtd(void)
741 DT0 = float64_sqrt(DT1, &env->fp_status);
744 void helper_fsqrtq(void)
746 QT0 = float128_sqrt(QT1, &env->fp_status);
749 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
750 void glue(helper_, name) (void) \
752 target_ulong new_fsr; \
754 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
755 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
756 case float_relation_unordered: \
757 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
758 if ((env->fsr & FSR_NVM) || TRAP) { \
759 env->fsr |= new_fsr; \
760 env->fsr |= FSR_NVC; \
761 env->fsr |= FSR_FTT_IEEE_EXCP; \
762 raise_exception(TT_FP_EXCP); \
763 } else { \
764 env->fsr |= FSR_NVA; \
766 break; \
767 case float_relation_less: \
768 new_fsr = FSR_FCC0 << FS; \
769 break; \
770 case float_relation_greater: \
771 new_fsr = FSR_FCC1 << FS; \
772 break; \
773 default: \
774 new_fsr = 0; \
775 break; \
777 env->fsr |= new_fsr; \
779 #define GEN_FCMPS(name, size, FS, TRAP) \
780 void glue(helper_, name)(float32 src1, float32 src2) \
782 target_ulong new_fsr; \
784 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
785 switch (glue(size, _compare) (src1, src2, &env->fp_status)) { \
786 case float_relation_unordered: \
787 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
788 if ((env->fsr & FSR_NVM) || TRAP) { \
789 env->fsr |= new_fsr; \
790 env->fsr |= FSR_NVC; \
791 env->fsr |= FSR_FTT_IEEE_EXCP; \
792 raise_exception(TT_FP_EXCP); \
793 } else { \
794 env->fsr |= FSR_NVA; \
796 break; \
797 case float_relation_less: \
798 new_fsr = FSR_FCC0 << FS; \
799 break; \
800 case float_relation_greater: \
801 new_fsr = FSR_FCC1 << FS; \
802 break; \
803 default: \
804 new_fsr = 0; \
805 break; \
807 env->fsr |= new_fsr; \
810 GEN_FCMPS(fcmps, float32, 0, 0);
811 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
813 GEN_FCMPS(fcmpes, float32, 0, 1);
814 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
816 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
817 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
819 #ifdef TARGET_SPARC64
820 GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
821 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
822 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
824 GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
825 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
826 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
828 GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
829 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
830 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
832 GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
833 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
834 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
836 GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
837 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
838 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
840 GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
841 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
842 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
843 #endif
844 #undef GEN_FCMPS
846 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
847 defined(DEBUG_MXCC)
848 static void dump_mxcc(CPUState *env)
850 printf("mxccdata: %016llx %016llx %016llx %016llx\n",
851 env->mxccdata[0], env->mxccdata[1],
852 env->mxccdata[2], env->mxccdata[3]);
853 printf("mxccregs: %016llx %016llx %016llx %016llx\n"
854 " %016llx %016llx %016llx %016llx\n",
855 env->mxccregs[0], env->mxccregs[1],
856 env->mxccregs[2], env->mxccregs[3],
857 env->mxccregs[4], env->mxccregs[5],
858 env->mxccregs[6], env->mxccregs[7]);
860 #endif
862 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
863 && defined(DEBUG_ASI)
864 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
865 uint64_t r1)
867 switch (size)
869 case 1:
870 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
871 addr, asi, r1 & 0xff);
872 break;
873 case 2:
874 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
875 addr, asi, r1 & 0xffff);
876 break;
877 case 4:
878 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
879 addr, asi, r1 & 0xffffffff);
880 break;
881 case 8:
882 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
883 addr, asi, r1);
884 break;
887 #endif
889 #ifndef TARGET_SPARC64
890 #ifndef CONFIG_USER_ONLY
891 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
893 uint64_t ret = 0;
894 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
895 uint32_t last_addr = addr;
896 #endif
898 helper_check_align(addr, size - 1);
899 switch (asi) {
900 case 2: /* SuperSparc MXCC registers */
901 switch (addr) {
902 case 0x01c00a00: /* MXCC control register */
903 if (size == 8)
904 ret = env->mxccregs[3];
905 else
906 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
907 size);
908 break;
909 case 0x01c00a04: /* MXCC control register */
910 if (size == 4)
911 ret = env->mxccregs[3];
912 else
913 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
914 size);
915 break;
916 case 0x01c00c00: /* Module reset register */
917 if (size == 8) {
918 ret = env->mxccregs[5];
919 // should we do something here?
920 } else
921 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
922 size);
923 break;
924 case 0x01c00f00: /* MBus port address register */
925 if (size == 8)
926 ret = env->mxccregs[7];
927 else
928 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
929 size);
930 break;
931 default:
932 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
933 size);
934 break;
936 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
937 "addr = %08x -> ret = %08x,"
938 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
939 #ifdef DEBUG_MXCC
940 dump_mxcc(env);
941 #endif
942 break;
943 case 3: /* MMU probe */
945 int mmulev;
947 mmulev = (addr >> 8) & 15;
948 if (mmulev > 4)
949 ret = 0;
950 else
951 ret = mmu_probe(env, addr, mmulev);
952 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
953 addr, mmulev, ret);
955 break;
956 case 4: /* read MMU regs */
958 int reg = (addr >> 8) & 0x1f;
960 ret = env->mmuregs[reg];
961 if (reg == 3) /* Fault status cleared on read */
962 env->mmuregs[3] = 0;
963 else if (reg == 0x13) /* Fault status read */
964 ret = env->mmuregs[3];
965 else if (reg == 0x14) /* Fault address read */
966 ret = env->mmuregs[4];
967 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
969 break;
970 case 5: // Turbosparc ITLB Diagnostic
971 case 6: // Turbosparc DTLB Diagnostic
972 case 7: // Turbosparc IOTLB Diagnostic
973 break;
974 case 9: /* Supervisor code access */
975 switch(size) {
976 case 1:
977 ret = ldub_code(addr);
978 break;
979 case 2:
980 ret = lduw_code(addr);
981 break;
982 default:
983 case 4:
984 ret = ldl_code(addr);
985 break;
986 case 8:
987 ret = ldq_code(addr);
988 break;
990 break;
991 case 0xa: /* User data access */
992 switch(size) {
993 case 1:
994 ret = ldub_user(addr);
995 break;
996 case 2:
997 ret = lduw_user(addr);
998 break;
999 default:
1000 case 4:
1001 ret = ldl_user(addr);
1002 break;
1003 case 8:
1004 ret = ldq_user(addr);
1005 break;
1007 break;
1008 case 0xb: /* Supervisor data access */
1009 switch(size) {
1010 case 1:
1011 ret = ldub_kernel(addr);
1012 break;
1013 case 2:
1014 ret = lduw_kernel(addr);
1015 break;
1016 default:
1017 case 4:
1018 ret = ldl_kernel(addr);
1019 break;
1020 case 8:
1021 ret = ldq_kernel(addr);
1022 break;
1024 break;
1025 case 0xc: /* I-cache tag */
1026 case 0xd: /* I-cache data */
1027 case 0xe: /* D-cache tag */
1028 case 0xf: /* D-cache data */
1029 break;
1030 case 0x20: /* MMU passthrough */
1031 switch(size) {
1032 case 1:
1033 ret = ldub_phys(addr);
1034 break;
1035 case 2:
1036 ret = lduw_phys(addr);
1037 break;
1038 default:
1039 case 4:
1040 ret = ldl_phys(addr);
1041 break;
1042 case 8:
1043 ret = ldq_phys(addr);
1044 break;
1046 break;
1047 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1048 switch(size) {
1049 case 1:
1050 ret = ldub_phys((target_phys_addr_t)addr
1051 | ((target_phys_addr_t)(asi & 0xf) << 32));
1052 break;
1053 case 2:
1054 ret = lduw_phys((target_phys_addr_t)addr
1055 | ((target_phys_addr_t)(asi & 0xf) << 32));
1056 break;
1057 default:
1058 case 4:
1059 ret = ldl_phys((target_phys_addr_t)addr
1060 | ((target_phys_addr_t)(asi & 0xf) << 32));
1061 break;
1062 case 8:
1063 ret = ldq_phys((target_phys_addr_t)addr
1064 | ((target_phys_addr_t)(asi & 0xf) << 32));
1065 break;
1067 break;
1068 case 0x30: // Turbosparc secondary cache diagnostic
1069 case 0x31: // Turbosparc RAM snoop
1070 case 0x32: // Turbosparc page table descriptor diagnostic
1071 case 0x39: /* data cache diagnostic register */
1072 ret = 0;
1073 break;
1074 case 8: /* User code access, XXX */
1075 default:
1076 do_unassigned_access(addr, 0, 0, asi);
1077 ret = 0;
1078 break;
1080 if (sign) {
1081 switch(size) {
1082 case 1:
1083 ret = (int8_t) ret;
1084 break;
1085 case 2:
1086 ret = (int16_t) ret;
1087 break;
1088 case 4:
1089 ret = (int32_t) ret;
1090 break;
1091 default:
1092 break;
1095 #ifdef DEBUG_ASI
1096 dump_asi("read ", last_addr, asi, size, ret);
1097 #endif
1098 return ret;
1101 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1103 helper_check_align(addr, size - 1);
1104 switch(asi) {
1105 case 2: /* SuperSparc MXCC registers */
1106 switch (addr) {
1107 case 0x01c00000: /* MXCC stream data register 0 */
1108 if (size == 8)
1109 env->mxccdata[0] = val;
1110 else
1111 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1112 size);
1113 break;
1114 case 0x01c00008: /* MXCC stream data register 1 */
1115 if (size == 8)
1116 env->mxccdata[1] = val;
1117 else
1118 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1119 size);
1120 break;
1121 case 0x01c00010: /* MXCC stream data register 2 */
1122 if (size == 8)
1123 env->mxccdata[2] = val;
1124 else
1125 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1126 size);
1127 break;
1128 case 0x01c00018: /* MXCC stream data register 3 */
1129 if (size == 8)
1130 env->mxccdata[3] = val;
1131 else
1132 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1133 size);
1134 break;
1135 case 0x01c00100: /* MXCC stream source */
1136 if (size == 8)
1137 env->mxccregs[0] = val;
1138 else
1139 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1140 size);
1141 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1143 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1145 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1146 16);
1147 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1148 24);
1149 break;
1150 case 0x01c00200: /* MXCC stream destination */
1151 if (size == 8)
1152 env->mxccregs[1] = val;
1153 else
1154 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1155 size);
1156 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
1157 env->mxccdata[0]);
1158 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
1159 env->mxccdata[1]);
1160 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1161 env->mxccdata[2]);
1162 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1163 env->mxccdata[3]);
1164 break;
1165 case 0x01c00a00: /* MXCC control register */
1166 if (size == 8)
1167 env->mxccregs[3] = val;
1168 else
1169 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1170 size);
1171 break;
1172 case 0x01c00a04: /* MXCC control register */
1173 if (size == 4)
1174 env->mxccregs[3] = (env->mxccregs[0xa] & 0xffffffff00000000ULL)
1175 | val;
1176 else
1177 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1178 size);
1179 break;
1180 case 0x01c00e00: /* MXCC error register */
1181 // writing a 1 bit clears the error
1182 if (size == 8)
1183 env->mxccregs[6] &= ~val;
1184 else
1185 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1186 size);
1187 break;
1188 case 0x01c00f00: /* MBus port address register */
1189 if (size == 8)
1190 env->mxccregs[7] = val;
1191 else
1192 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1193 size);
1194 break;
1195 default:
1196 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1197 size);
1198 break;
1200 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %08x\n", asi,
1201 size, addr, val);
1202 #ifdef DEBUG_MXCC
1203 dump_mxcc(env);
1204 #endif
1205 break;
1206 case 3: /* MMU flush */
1208 int mmulev;
1210 mmulev = (addr >> 8) & 15;
1211 DPRINTF_MMU("mmu flush level %d\n", mmulev);
1212 switch (mmulev) {
1213 case 0: // flush page
1214 tlb_flush_page(env, addr & 0xfffff000);
1215 break;
1216 case 1: // flush segment (256k)
1217 case 2: // flush region (16M)
1218 case 3: // flush context (4G)
1219 case 4: // flush entire
1220 tlb_flush(env, 1);
1221 break;
1222 default:
1223 break;
1225 #ifdef DEBUG_MMU
1226 dump_mmu(env);
1227 #endif
1229 break;
1230 case 4: /* write MMU regs */
1232 int reg = (addr >> 8) & 0x1f;
1233 uint32_t oldreg;
1235 oldreg = env->mmuregs[reg];
1236 switch(reg) {
1237 case 0: // Control Register
1238 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1239 (val & 0x00ffffff);
1240 // Mappings generated during no-fault mode or MMU
1241 // disabled mode are invalid in normal mode
1242 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1243 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1244 tlb_flush(env, 1);
1245 break;
1246 case 1: // Context Table Pointer Register
1247 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1248 break;
1249 case 2: // Context Register
1250 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1251 if (oldreg != env->mmuregs[reg]) {
1252 /* we flush when the MMU context changes because
1253 QEMU has no MMU context support */
1254 tlb_flush(env, 1);
1256 break;
1257 case 3: // Synchronous Fault Status Register with Clear
1258 case 4: // Synchronous Fault Address Register
1259 break;
1260 case 0x10: // TLB Replacement Control Register
1261 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1262 break;
1263 case 0x13: // Synchronous Fault Status Register with Read and Clear
1264 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
1265 break;
1266 case 0x14: // Synchronous Fault Address Register
1267 env->mmuregs[4] = val;
1268 break;
1269 default:
1270 env->mmuregs[reg] = val;
1271 break;
1273 if (oldreg != env->mmuregs[reg]) {
1274 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1275 reg, oldreg, env->mmuregs[reg]);
1277 #ifdef DEBUG_MMU
1278 dump_mmu(env);
1279 #endif
1281 break;
1282 case 5: // Turbosparc ITLB Diagnostic
1283 case 6: // Turbosparc DTLB Diagnostic
1284 case 7: // Turbosparc IOTLB Diagnostic
1285 break;
1286 case 0xa: /* User data access */
1287 switch(size) {
1288 case 1:
1289 stb_user(addr, val);
1290 break;
1291 case 2:
1292 stw_user(addr, val);
1293 break;
1294 default:
1295 case 4:
1296 stl_user(addr, val);
1297 break;
1298 case 8:
1299 stq_user(addr, val);
1300 break;
1302 break;
1303 case 0xb: /* Supervisor data access */
1304 switch(size) {
1305 case 1:
1306 stb_kernel(addr, val);
1307 break;
1308 case 2:
1309 stw_kernel(addr, val);
1310 break;
1311 default:
1312 case 4:
1313 stl_kernel(addr, val);
1314 break;
1315 case 8:
1316 stq_kernel(addr, val);
1317 break;
1319 break;
1320 case 0xc: /* I-cache tag */
1321 case 0xd: /* I-cache data */
1322 case 0xe: /* D-cache tag */
1323 case 0xf: /* D-cache data */
1324 case 0x10: /* I/D-cache flush page */
1325 case 0x11: /* I/D-cache flush segment */
1326 case 0x12: /* I/D-cache flush region */
1327 case 0x13: /* I/D-cache flush context */
1328 case 0x14: /* I/D-cache flush user */
1329 break;
1330 case 0x17: /* Block copy, sta access */
1332 // val = src
1333 // addr = dst
1334 // copy 32 bytes
1335 unsigned int i;
1336 uint32_t src = val & ~3, dst = addr & ~3, temp;
1338 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
1339 temp = ldl_kernel(src);
1340 stl_kernel(dst, temp);
1343 break;
1344 case 0x1f: /* Block fill, stda access */
1346 // addr = dst
1347 // fill 32 bytes with val
1348 unsigned int i;
1349 uint32_t dst = addr & 7;
1351 for (i = 0; i < 32; i += 8, dst += 8)
1352 stq_kernel(dst, val);
1354 break;
1355 case 0x20: /* MMU passthrough */
1357 switch(size) {
1358 case 1:
1359 stb_phys(addr, val);
1360 break;
1361 case 2:
1362 stw_phys(addr, val);
1363 break;
1364 case 4:
1365 default:
1366 stl_phys(addr, val);
1367 break;
1368 case 8:
1369 stq_phys(addr, val);
1370 break;
1373 break;
1374 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1376 switch(size) {
1377 case 1:
1378 stb_phys((target_phys_addr_t)addr
1379 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1380 break;
1381 case 2:
1382 stw_phys((target_phys_addr_t)addr
1383 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1384 break;
1385 case 4:
1386 default:
1387 stl_phys((target_phys_addr_t)addr
1388 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1389 break;
1390 case 8:
1391 stq_phys((target_phys_addr_t)addr
1392 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1393 break;
1396 break;
1397 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1398 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1399 // Turbosparc snoop RAM
1400 case 0x32: // store buffer control or Turbosparc page table
1401 // descriptor diagnostic
1402 case 0x36: /* I-cache flash clear */
1403 case 0x37: /* D-cache flash clear */
1404 case 0x38: /* breakpoint diagnostics */
1405 case 0x4c: /* breakpoint action */
1406 break;
1407 case 8: /* User code access, XXX */
1408 case 9: /* Supervisor code access, XXX */
1409 default:
1410 do_unassigned_access(addr, 1, 0, asi);
1411 break;
1413 #ifdef DEBUG_ASI
1414 dump_asi("write", addr, asi, size, val);
1415 #endif
1418 #endif /* CONFIG_USER_ONLY */
1419 #else /* TARGET_SPARC64 */
1421 #ifdef CONFIG_USER_ONLY
1422 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1424 uint64_t ret = 0;
1425 #if defined(DEBUG_ASI)
1426 target_ulong last_addr = addr;
1427 #endif
1429 if (asi < 0x80)
1430 raise_exception(TT_PRIV_ACT);
1432 helper_check_align(addr, size - 1);
1433 address_mask(env, &addr);
1435 switch (asi) {
1436 case 0x82: // Primary no-fault
1437 case 0x8a: // Primary no-fault LE
1438 if (page_check_range(addr, size, PAGE_READ) == -1) {
1439 #ifdef DEBUG_ASI
1440 dump_asi("read ", last_addr, asi, size, ret);
1441 #endif
1442 return 0;
1444 // Fall through
1445 case 0x80: // Primary
1446 case 0x88: // Primary LE
1448 switch(size) {
1449 case 1:
1450 ret = ldub_raw(addr);
1451 break;
1452 case 2:
1453 ret = lduw_raw(addr);
1454 break;
1455 case 4:
1456 ret = ldl_raw(addr);
1457 break;
1458 default:
1459 case 8:
1460 ret = ldq_raw(addr);
1461 break;
1464 break;
1465 case 0x83: // Secondary no-fault
1466 case 0x8b: // Secondary no-fault LE
1467 if (page_check_range(addr, size, PAGE_READ) == -1) {
1468 #ifdef DEBUG_ASI
1469 dump_asi("read ", last_addr, asi, size, ret);
1470 #endif
1471 return 0;
1473 // Fall through
1474 case 0x81: // Secondary
1475 case 0x89: // Secondary LE
1476 // XXX
1477 break;
1478 default:
1479 break;
1482 /* Convert from little endian */
1483 switch (asi) {
1484 case 0x88: // Primary LE
1485 case 0x89: // Secondary LE
1486 case 0x8a: // Primary no-fault LE
1487 case 0x8b: // Secondary no-fault LE
1488 switch(size) {
1489 case 2:
1490 ret = bswap16(ret);
1491 break;
1492 case 4:
1493 ret = bswap32(ret);
1494 break;
1495 case 8:
1496 ret = bswap64(ret);
1497 break;
1498 default:
1499 break;
1501 default:
1502 break;
1505 /* Convert to signed number */
1506 if (sign) {
1507 switch(size) {
1508 case 1:
1509 ret = (int8_t) ret;
1510 break;
1511 case 2:
1512 ret = (int16_t) ret;
1513 break;
1514 case 4:
1515 ret = (int32_t) ret;
1516 break;
1517 default:
1518 break;
1521 #ifdef DEBUG_ASI
1522 dump_asi("read ", last_addr, asi, size, ret);
1523 #endif
1524 return ret;
1527 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1529 #ifdef DEBUG_ASI
1530 dump_asi("write", addr, asi, size, val);
1531 #endif
1532 if (asi < 0x80)
1533 raise_exception(TT_PRIV_ACT);
1535 helper_check_align(addr, size - 1);
1536 address_mask(env, &addr);
1538 /* Convert to little endian */
1539 switch (asi) {
1540 case 0x88: // Primary LE
1541 case 0x89: // Secondary LE
1542 switch(size) {
1543 case 2:
1544 addr = bswap16(addr);
1545 break;
1546 case 4:
1547 addr = bswap32(addr);
1548 break;
1549 case 8:
1550 addr = bswap64(addr);
1551 break;
1552 default:
1553 break;
1555 default:
1556 break;
1559 switch(asi) {
1560 case 0x80: // Primary
1561 case 0x88: // Primary LE
1563 switch(size) {
1564 case 1:
1565 stb_raw(addr, val);
1566 break;
1567 case 2:
1568 stw_raw(addr, val);
1569 break;
1570 case 4:
1571 stl_raw(addr, val);
1572 break;
1573 case 8:
1574 default:
1575 stq_raw(addr, val);
1576 break;
1579 break;
1580 case 0x81: // Secondary
1581 case 0x89: // Secondary LE
1582 // XXX
1583 return;
1585 case 0x82: // Primary no-fault, RO
1586 case 0x83: // Secondary no-fault, RO
1587 case 0x8a: // Primary no-fault LE, RO
1588 case 0x8b: // Secondary no-fault LE, RO
1589 default:
1590 do_unassigned_access(addr, 1, 0, 1);
1591 return;
1595 #else /* CONFIG_USER_ONLY */
1597 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1599 uint64_t ret = 0;
1600 #if defined(DEBUG_ASI)
1601 target_ulong last_addr = addr;
1602 #endif
1604 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1605 || ((env->def->features & CPU_FEATURE_HYPV)
1606 && asi >= 0x30 && asi < 0x80
1607 && !(env->hpstate & HS_PRIV)))
1608 raise_exception(TT_PRIV_ACT);
1610 helper_check_align(addr, size - 1);
1611 switch (asi) {
1612 case 0x82: // Primary no-fault
1613 case 0x8a: // Primary no-fault LE
1614 if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
1615 #ifdef DEBUG_ASI
1616 dump_asi("read ", last_addr, asi, size, ret);
1617 #endif
1618 return 0;
1620 // Fall through
1621 case 0x10: // As if user primary
1622 case 0x18: // As if user primary LE
1623 case 0x80: // Primary
1624 case 0x88: // Primary LE
1625 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1626 if ((env->def->features & CPU_FEATURE_HYPV)
1627 && env->hpstate & HS_PRIV) {
1628 switch(size) {
1629 case 1:
1630 ret = ldub_hypv(addr);
1631 break;
1632 case 2:
1633 ret = lduw_hypv(addr);
1634 break;
1635 case 4:
1636 ret = ldl_hypv(addr);
1637 break;
1638 default:
1639 case 8:
1640 ret = ldq_hypv(addr);
1641 break;
1643 } else {
1644 switch(size) {
1645 case 1:
1646 ret = ldub_kernel(addr);
1647 break;
1648 case 2:
1649 ret = lduw_kernel(addr);
1650 break;
1651 case 4:
1652 ret = ldl_kernel(addr);
1653 break;
1654 default:
1655 case 8:
1656 ret = ldq_kernel(addr);
1657 break;
1660 } else {
1661 switch(size) {
1662 case 1:
1663 ret = ldub_user(addr);
1664 break;
1665 case 2:
1666 ret = lduw_user(addr);
1667 break;
1668 case 4:
1669 ret = ldl_user(addr);
1670 break;
1671 default:
1672 case 8:
1673 ret = ldq_user(addr);
1674 break;
1677 break;
1678 case 0x14: // Bypass
1679 case 0x15: // Bypass, non-cacheable
1680 case 0x1c: // Bypass LE
1681 case 0x1d: // Bypass, non-cacheable LE
1683 switch(size) {
1684 case 1:
1685 ret = ldub_phys(addr);
1686 break;
1687 case 2:
1688 ret = lduw_phys(addr);
1689 break;
1690 case 4:
1691 ret = ldl_phys(addr);
1692 break;
1693 default:
1694 case 8:
1695 ret = ldq_phys(addr);
1696 break;
1698 break;
1700 case 0x24: // Nucleus quad LDD 128 bit atomic
1701 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1702 // Only ldda allowed
1703 raise_exception(TT_ILL_INSN);
1704 return 0;
1705 case 0x83: // Secondary no-fault
1706 case 0x8b: // Secondary no-fault LE
1707 if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
1708 #ifdef DEBUG_ASI
1709 dump_asi("read ", last_addr, asi, size, ret);
1710 #endif
1711 return 0;
1713 // Fall through
1714 case 0x04: // Nucleus
1715 case 0x0c: // Nucleus Little Endian (LE)
1716 case 0x11: // As if user secondary
1717 case 0x19: // As if user secondary LE
1718 case 0x4a: // UPA config
1719 case 0x81: // Secondary
1720 case 0x89: // Secondary LE
1721 // XXX
1722 break;
1723 case 0x45: // LSU
1724 ret = env->lsu;
1725 break;
1726 case 0x50: // I-MMU regs
1728 int reg = (addr >> 3) & 0xf;
1730 ret = env->immuregs[reg];
1731 break;
1733 case 0x51: // I-MMU 8k TSB pointer
1734 case 0x52: // I-MMU 64k TSB pointer
1735 // XXX
1736 break;
1737 case 0x55: // I-MMU data access
1739 int reg = (addr >> 3) & 0x3f;
1741 ret = env->itlb_tte[reg];
1742 break;
1744 case 0x56: // I-MMU tag read
1746 int reg = (addr >> 3) & 0x3f;
1748 ret = env->itlb_tag[reg];
1749 break;
1751 case 0x58: // D-MMU regs
1753 int reg = (addr >> 3) & 0xf;
1755 ret = env->dmmuregs[reg];
1756 break;
1758 case 0x5d: // D-MMU data access
1760 int reg = (addr >> 3) & 0x3f;
1762 ret = env->dtlb_tte[reg];
1763 break;
1765 case 0x5e: // D-MMU tag read
1767 int reg = (addr >> 3) & 0x3f;
1769 ret = env->dtlb_tag[reg];
1770 break;
1772 case 0x46: // D-cache data
1773 case 0x47: // D-cache tag access
1774 case 0x4b: // E-cache error enable
1775 case 0x4c: // E-cache asynchronous fault status
1776 case 0x4d: // E-cache asynchronous fault address
1777 case 0x4e: // E-cache tag data
1778 case 0x66: // I-cache instruction access
1779 case 0x67: // I-cache tag access
1780 case 0x6e: // I-cache predecode
1781 case 0x6f: // I-cache LRU etc.
1782 case 0x76: // E-cache tag
1783 case 0x7e: // E-cache tag
1784 break;
1785 case 0x59: // D-MMU 8k TSB pointer
1786 case 0x5a: // D-MMU 64k TSB pointer
1787 case 0x5b: // D-MMU data pointer
1788 case 0x48: // Interrupt dispatch, RO
1789 case 0x49: // Interrupt data receive
1790 case 0x7f: // Incoming interrupt vector, RO
1791 // XXX
1792 break;
1793 case 0x54: // I-MMU data in, WO
1794 case 0x57: // I-MMU demap, WO
1795 case 0x5c: // D-MMU data in, WO
1796 case 0x5f: // D-MMU demap, WO
1797 case 0x77: // Interrupt vector, WO
1798 default:
1799 do_unassigned_access(addr, 0, 0, 1);
1800 ret = 0;
1801 break;
1804 /* Convert from little endian */
1805 switch (asi) {
1806 case 0x0c: // Nucleus Little Endian (LE)
1807 case 0x18: // As if user primary LE
1808 case 0x19: // As if user secondary LE
1809 case 0x1c: // Bypass LE
1810 case 0x1d: // Bypass, non-cacheable LE
1811 case 0x88: // Primary LE
1812 case 0x89: // Secondary LE
1813 case 0x8a: // Primary no-fault LE
1814 case 0x8b: // Secondary no-fault LE
1815 switch(size) {
1816 case 2:
1817 ret = bswap16(ret);
1818 break;
1819 case 4:
1820 ret = bswap32(ret);
1821 break;
1822 case 8:
1823 ret = bswap64(ret);
1824 break;
1825 default:
1826 break;
1828 default:
1829 break;
1832 /* Convert to signed number */
1833 if (sign) {
1834 switch(size) {
1835 case 1:
1836 ret = (int8_t) ret;
1837 break;
1838 case 2:
1839 ret = (int16_t) ret;
1840 break;
1841 case 4:
1842 ret = (int32_t) ret;
1843 break;
1844 default:
1845 break;
1848 #ifdef DEBUG_ASI
1849 dump_asi("read ", last_addr, asi, size, ret);
1850 #endif
1851 return ret;
1854 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1856 #ifdef DEBUG_ASI
1857 dump_asi("write", addr, asi, size, val);
1858 #endif
1859 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1860 || ((env->def->features & CPU_FEATURE_HYPV)
1861 && asi >= 0x30 && asi < 0x80
1862 && !(env->hpstate & HS_PRIV)))
1863 raise_exception(TT_PRIV_ACT);
1865 helper_check_align(addr, size - 1);
1866 /* Convert to little endian */
1867 switch (asi) {
1868 case 0x0c: // Nucleus Little Endian (LE)
1869 case 0x18: // As if user primary LE
1870 case 0x19: // As if user secondary LE
1871 case 0x1c: // Bypass LE
1872 case 0x1d: // Bypass, non-cacheable LE
1873 case 0x88: // Primary LE
1874 case 0x89: // Secondary LE
1875 switch(size) {
1876 case 2:
1877 addr = bswap16(addr);
1878 break;
1879 case 4:
1880 addr = bswap32(addr);
1881 break;
1882 case 8:
1883 addr = bswap64(addr);
1884 break;
1885 default:
1886 break;
1888 default:
1889 break;
1892 switch(asi) {
1893 case 0x10: // As if user primary
1894 case 0x18: // As if user primary LE
1895 case 0x80: // Primary
1896 case 0x88: // Primary LE
1897 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1898 if ((env->def->features & CPU_FEATURE_HYPV)
1899 && env->hpstate & HS_PRIV) {
1900 switch(size) {
1901 case 1:
1902 stb_hypv(addr, val);
1903 break;
1904 case 2:
1905 stw_hypv(addr, val);
1906 break;
1907 case 4:
1908 stl_hypv(addr, val);
1909 break;
1910 case 8:
1911 default:
1912 stq_hypv(addr, val);
1913 break;
1915 } else {
1916 switch(size) {
1917 case 1:
1918 stb_kernel(addr, val);
1919 break;
1920 case 2:
1921 stw_kernel(addr, val);
1922 break;
1923 case 4:
1924 stl_kernel(addr, val);
1925 break;
1926 case 8:
1927 default:
1928 stq_kernel(addr, val);
1929 break;
1932 } else {
1933 switch(size) {
1934 case 1:
1935 stb_user(addr, val);
1936 break;
1937 case 2:
1938 stw_user(addr, val);
1939 break;
1940 case 4:
1941 stl_user(addr, val);
1942 break;
1943 case 8:
1944 default:
1945 stq_user(addr, val);
1946 break;
1949 break;
1950 case 0x14: // Bypass
1951 case 0x15: // Bypass, non-cacheable
1952 case 0x1c: // Bypass LE
1953 case 0x1d: // Bypass, non-cacheable LE
1955 switch(size) {
1956 case 1:
1957 stb_phys(addr, val);
1958 break;
1959 case 2:
1960 stw_phys(addr, val);
1961 break;
1962 case 4:
1963 stl_phys(addr, val);
1964 break;
1965 case 8:
1966 default:
1967 stq_phys(addr, val);
1968 break;
1971 return;
1972 case 0x24: // Nucleus quad LDD 128 bit atomic
1973 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1974 // Only ldda allowed
1975 raise_exception(TT_ILL_INSN);
1976 return;
1977 case 0x04: // Nucleus
1978 case 0x0c: // Nucleus Little Endian (LE)
1979 case 0x11: // As if user secondary
1980 case 0x19: // As if user secondary LE
1981 case 0x4a: // UPA config
1982 case 0x81: // Secondary
1983 case 0x89: // Secondary LE
1984 // XXX
1985 return;
1986 case 0x45: // LSU
1988 uint64_t oldreg;
1990 oldreg = env->lsu;
1991 env->lsu = val & (DMMU_E | IMMU_E);
1992 // Mappings generated during D/I MMU disabled mode are
1993 // invalid in normal mode
1994 if (oldreg != env->lsu) {
1995 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
1996 oldreg, env->lsu);
1997 #ifdef DEBUG_MMU
1998 dump_mmu(env);
1999 #endif
2000 tlb_flush(env, 1);
2002 return;
2004 case 0x50: // I-MMU regs
2006 int reg = (addr >> 3) & 0xf;
2007 uint64_t oldreg;
2009 oldreg = env->immuregs[reg];
2010 switch(reg) {
2011 case 0: // RO
2012 case 4:
2013 return;
2014 case 1: // Not in I-MMU
2015 case 2:
2016 case 7:
2017 case 8:
2018 return;
2019 case 3: // SFSR
2020 if ((val & 1) == 0)
2021 val = 0; // Clear SFSR
2022 break;
2023 case 5: // TSB access
2024 case 6: // Tag access
2025 default:
2026 break;
2028 env->immuregs[reg] = val;
2029 if (oldreg != env->immuregs[reg]) {
2030 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2031 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
2033 #ifdef DEBUG_MMU
2034 dump_mmu(env);
2035 #endif
2036 return;
2038 case 0x54: // I-MMU data in
2040 unsigned int i;
2042 // Try finding an invalid entry
2043 for (i = 0; i < 64; i++) {
2044 if ((env->itlb_tte[i] & 0x8000000000000000ULL) == 0) {
2045 env->itlb_tag[i] = env->immuregs[6];
2046 env->itlb_tte[i] = val;
2047 return;
2050 // Try finding an unlocked entry
2051 for (i = 0; i < 64; i++) {
2052 if ((env->itlb_tte[i] & 0x40) == 0) {
2053 env->itlb_tag[i] = env->immuregs[6];
2054 env->itlb_tte[i] = val;
2055 return;
2058 // error state?
2059 return;
2061 case 0x55: // I-MMU data access
2063 unsigned int i = (addr >> 3) & 0x3f;
2065 env->itlb_tag[i] = env->immuregs[6];
2066 env->itlb_tte[i] = val;
2067 return;
2069 case 0x57: // I-MMU demap
2070 // XXX
2071 return;
2072 case 0x58: // D-MMU regs
2074 int reg = (addr >> 3) & 0xf;
2075 uint64_t oldreg;
2077 oldreg = env->dmmuregs[reg];
2078 switch(reg) {
2079 case 0: // RO
2080 case 4:
2081 return;
2082 case 3: // SFSR
2083 if ((val & 1) == 0) {
2084 val = 0; // Clear SFSR, Fault address
2085 env->dmmuregs[4] = 0;
2087 env->dmmuregs[reg] = val;
2088 break;
2089 case 1: // Primary context
2090 case 2: // Secondary context
2091 case 5: // TSB access
2092 case 6: // Tag access
2093 case 7: // Virtual Watchpoint
2094 case 8: // Physical Watchpoint
2095 default:
2096 break;
2098 env->dmmuregs[reg] = val;
2099 if (oldreg != env->dmmuregs[reg]) {
2100 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2101 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2103 #ifdef DEBUG_MMU
2104 dump_mmu(env);
2105 #endif
2106 return;
2108 case 0x5c: // D-MMU data in
2110 unsigned int i;
2112 // Try finding an invalid entry
2113 for (i = 0; i < 64; i++) {
2114 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) == 0) {
2115 env->dtlb_tag[i] = env->dmmuregs[6];
2116 env->dtlb_tte[i] = val;
2117 return;
2120 // Try finding an unlocked entry
2121 for (i = 0; i < 64; i++) {
2122 if ((env->dtlb_tte[i] & 0x40) == 0) {
2123 env->dtlb_tag[i] = env->dmmuregs[6];
2124 env->dtlb_tte[i] = val;
2125 return;
2128 // error state?
2129 return;
2131 case 0x5d: // D-MMU data access
2133 unsigned int i = (addr >> 3) & 0x3f;
2135 env->dtlb_tag[i] = env->dmmuregs[6];
2136 env->dtlb_tte[i] = val;
2137 return;
2139 case 0x5f: // D-MMU demap
2140 case 0x49: // Interrupt data receive
2141 // XXX
2142 return;
2143 case 0x46: // D-cache data
2144 case 0x47: // D-cache tag access
2145 case 0x4b: // E-cache error enable
2146 case 0x4c: // E-cache asynchronous fault status
2147 case 0x4d: // E-cache asynchronous fault address
2148 case 0x4e: // E-cache tag data
2149 case 0x66: // I-cache instruction access
2150 case 0x67: // I-cache tag access
2151 case 0x6e: // I-cache predecode
2152 case 0x6f: // I-cache LRU etc.
2153 case 0x76: // E-cache tag
2154 case 0x7e: // E-cache tag
2155 return;
2156 case 0x51: // I-MMU 8k TSB pointer, RO
2157 case 0x52: // I-MMU 64k TSB pointer, RO
2158 case 0x56: // I-MMU tag read, RO
2159 case 0x59: // D-MMU 8k TSB pointer, RO
2160 case 0x5a: // D-MMU 64k TSB pointer, RO
2161 case 0x5b: // D-MMU data pointer, RO
2162 case 0x5e: // D-MMU tag read, RO
2163 case 0x48: // Interrupt dispatch, RO
2164 case 0x7f: // Incoming interrupt vector, RO
2165 case 0x82: // Primary no-fault, RO
2166 case 0x83: // Secondary no-fault, RO
2167 case 0x8a: // Primary no-fault LE, RO
2168 case 0x8b: // Secondary no-fault LE, RO
2169 default:
2170 do_unassigned_access(addr, 1, 0, 1);
2171 return;
2174 #endif /* CONFIG_USER_ONLY */
2176 void helper_ldda_asi(target_ulong addr, int asi, int rd)
2178 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2179 || ((env->def->features & CPU_FEATURE_HYPV)
2180 && asi >= 0x30 && asi < 0x80
2181 && !(env->hpstate & HS_PRIV)))
2182 raise_exception(TT_PRIV_ACT);
2184 switch (asi) {
2185 case 0x24: // Nucleus quad LDD 128 bit atomic
2186 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2187 helper_check_align(addr, 0xf);
2188 if (rd == 0) {
2189 env->gregs[1] = ldq_kernel(addr + 8);
2190 if (asi == 0x2c)
2191 bswap64s(&env->gregs[1]);
2192 } else if (rd < 8) {
2193 env->gregs[rd] = ldq_kernel(addr);
2194 env->gregs[rd + 1] = ldq_kernel(addr + 8);
2195 if (asi == 0x2c) {
2196 bswap64s(&env->gregs[rd]);
2197 bswap64s(&env->gregs[rd + 1]);
2199 } else {
2200 env->regwptr[rd] = ldq_kernel(addr);
2201 env->regwptr[rd + 1] = ldq_kernel(addr + 8);
2202 if (asi == 0x2c) {
2203 bswap64s(&env->regwptr[rd]);
2204 bswap64s(&env->regwptr[rd + 1]);
2207 break;
2208 default:
2209 helper_check_align(addr, 0x3);
2210 if (rd == 0)
2211 env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
2212 else if (rd < 8) {
2213 env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
2214 env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2215 } else {
2216 env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
2217 env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2219 break;
2223 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2225 unsigned int i;
2226 target_ulong val;
2228 helper_check_align(addr, 3);
2229 switch (asi) {
2230 case 0xf0: // Block load primary
2231 case 0xf1: // Block load secondary
2232 case 0xf8: // Block load primary LE
2233 case 0xf9: // Block load secondary LE
2234 if (rd & 7) {
2235 raise_exception(TT_ILL_INSN);
2236 return;
2238 helper_check_align(addr, 0x3f);
2239 for (i = 0; i < 16; i++) {
2240 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
2242 addr += 4;
2245 return;
2246 default:
2247 break;
2250 val = helper_ld_asi(addr, asi, size, 0);
2251 switch(size) {
2252 default:
2253 case 4:
2254 *((uint32_t *)&env->fpr[rd]) = val;
2255 break;
2256 case 8:
2257 *((int64_t *)&DT0) = val;
2258 break;
2259 case 16:
2260 // XXX
2261 break;
2265 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2267 unsigned int i;
2268 target_ulong val = 0;
2270 helper_check_align(addr, 3);
2271 switch (asi) {
2272 case 0xf0: // Block store primary
2273 case 0xf1: // Block store secondary
2274 case 0xf8: // Block store primary LE
2275 case 0xf9: // Block store secondary LE
2276 if (rd & 7) {
2277 raise_exception(TT_ILL_INSN);
2278 return;
2280 helper_check_align(addr, 0x3f);
2281 for (i = 0; i < 16; i++) {
2282 val = *(uint32_t *)&env->fpr[rd++];
2283 helper_st_asi(addr, val, asi & 0x8f, 4);
2284 addr += 4;
2287 return;
2288 default:
2289 break;
2292 switch(size) {
2293 default:
2294 case 4:
2295 val = *((uint32_t *)&env->fpr[rd]);
2296 break;
2297 case 8:
2298 val = *((int64_t *)&DT0);
2299 break;
2300 case 16:
2301 // XXX
2302 break;
2304 helper_st_asi(addr, val, asi, size);
2307 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2308 target_ulong val2, uint32_t asi)
2310 target_ulong ret;
2312 val1 &= 0xffffffffUL;
2313 ret = helper_ld_asi(addr, asi, 4, 0);
2314 ret &= 0xffffffffUL;
2315 if (val1 == ret)
2316 helper_st_asi(addr, val2 & 0xffffffffUL, asi, 4);
2317 return ret;
2320 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2321 target_ulong val2, uint32_t asi)
2323 target_ulong ret;
2325 ret = helper_ld_asi(addr, asi, 8, 0);
2326 if (val1 == ret)
2327 helper_st_asi(addr, val2, asi, 8);
2328 return ret;
2330 #endif /* TARGET_SPARC64 */
2332 #ifndef TARGET_SPARC64
2333 void helper_rett(void)
2335 unsigned int cwp;
2337 if (env->psret == 1)
2338 raise_exception(TT_ILL_INSN);
2340 env->psret = 1;
2341 cwp = cpu_cwp_inc(env, env->cwp + 1) ;
2342 if (env->wim & (1 << cwp)) {
2343 raise_exception(TT_WIN_UNF);
2345 set_cwp(cwp);
2346 env->psrs = env->psrps;
2348 #endif
2350 target_ulong helper_udiv(target_ulong a, target_ulong b)
2352 uint64_t x0;
2353 uint32_t x1;
2355 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2356 x1 = b;
2358 if (x1 == 0) {
2359 raise_exception(TT_DIV_ZERO);
2362 x0 = x0 / x1;
2363 if (x0 > 0xffffffff) {
2364 env->cc_src2 = 1;
2365 return 0xffffffff;
2366 } else {
2367 env->cc_src2 = 0;
2368 return x0;
2372 target_ulong helper_sdiv(target_ulong a, target_ulong b)
2374 int64_t x0;
2375 int32_t x1;
2377 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2378 x1 = b;
2380 if (x1 == 0) {
2381 raise_exception(TT_DIV_ZERO);
2384 x0 = x0 / x1;
2385 if ((int32_t) x0 != x0) {
2386 env->cc_src2 = 1;
2387 return x0 < 0? 0x80000000: 0x7fffffff;
2388 } else {
2389 env->cc_src2 = 0;
2390 return x0;
2394 uint64_t helper_pack64(target_ulong high, target_ulong low)
2396 return ((uint64_t)high << 32) | (uint64_t)(low & 0xffffffff);
2399 void helper_stdf(target_ulong addr, int mem_idx)
2401 helper_check_align(addr, 7);
2402 #if !defined(CONFIG_USER_ONLY)
2403 switch (mem_idx) {
2404 case 0:
2405 stfq_user(addr, DT0);
2406 break;
2407 case 1:
2408 stfq_kernel(addr, DT0);
2409 break;
2410 #ifdef TARGET_SPARC64
2411 case 2:
2412 stfq_hypv(addr, DT0);
2413 break;
2414 #endif
2415 default:
2416 break;
2418 #else
2419 address_mask(env, &addr);
2420 stfq_raw(addr, DT0);
2421 #endif
2424 void helper_lddf(target_ulong addr, int mem_idx)
2426 helper_check_align(addr, 7);
2427 #if !defined(CONFIG_USER_ONLY)
2428 switch (mem_idx) {
2429 case 0:
2430 DT0 = ldfq_user(addr);
2431 break;
2432 case 1:
2433 DT0 = ldfq_kernel(addr);
2434 break;
2435 #ifdef TARGET_SPARC64
2436 case 2:
2437 DT0 = ldfq_hypv(addr);
2438 break;
2439 #endif
2440 default:
2441 break;
2443 #else
2444 address_mask(env, &addr);
2445 DT0 = ldfq_raw(addr);
2446 #endif
2449 void helper_ldqf(target_ulong addr, int mem_idx)
2451 // XXX add 128 bit load
2452 CPU_QuadU u;
2454 helper_check_align(addr, 7);
2455 #if !defined(CONFIG_USER_ONLY)
2456 switch (mem_idx) {
2457 case 0:
2458 u.ll.upper = ldq_user(addr);
2459 u.ll.lower = ldq_user(addr + 8);
2460 QT0 = u.q;
2461 break;
2462 case 1:
2463 u.ll.upper = ldq_kernel(addr);
2464 u.ll.lower = ldq_kernel(addr + 8);
2465 QT0 = u.q;
2466 break;
2467 #ifdef TARGET_SPARC64
2468 case 2:
2469 u.ll.upper = ldq_hypv(addr);
2470 u.ll.lower = ldq_hypv(addr + 8);
2471 QT0 = u.q;
2472 break;
2473 #endif
2474 default:
2475 break;
2477 #else
2478 address_mask(env, &addr);
2479 u.ll.upper = ldq_raw(addr);
2480 u.ll.lower = ldq_raw((addr + 8) & 0xffffffffULL);
2481 QT0 = u.q;
2482 #endif
2485 void helper_stqf(target_ulong addr, int mem_idx)
2487 // XXX add 128 bit store
2488 CPU_QuadU u;
2490 helper_check_align(addr, 7);
2491 #if !defined(CONFIG_USER_ONLY)
2492 switch (mem_idx) {
2493 case 0:
2494 u.q = QT0;
2495 stq_user(addr, u.ll.upper);
2496 stq_user(addr + 8, u.ll.lower);
2497 break;
2498 case 1:
2499 u.q = QT0;
2500 stq_kernel(addr, u.ll.upper);
2501 stq_kernel(addr + 8, u.ll.lower);
2502 break;
2503 #ifdef TARGET_SPARC64
2504 case 2:
2505 u.q = QT0;
2506 stq_hypv(addr, u.ll.upper);
2507 stq_hypv(addr + 8, u.ll.lower);
2508 break;
2509 #endif
2510 default:
2511 break;
2513 #else
2514 u.q = QT0;
2515 address_mask(env, &addr);
2516 stq_raw(addr, u.ll.upper);
2517 stq_raw((addr + 8) & 0xffffffffULL, u.ll.lower);
2518 #endif
2521 static inline void set_fsr(void)
2523 int rnd_mode;
2525 switch (env->fsr & FSR_RD_MASK) {
2526 case FSR_RD_NEAREST:
2527 rnd_mode = float_round_nearest_even;
2528 break;
2529 default:
2530 case FSR_RD_ZERO:
2531 rnd_mode = float_round_to_zero;
2532 break;
2533 case FSR_RD_POS:
2534 rnd_mode = float_round_up;
2535 break;
2536 case FSR_RD_NEG:
2537 rnd_mode = float_round_down;
2538 break;
2540 set_float_rounding_mode(rnd_mode, &env->fp_status);
2543 void helper_ldfsr(uint32_t new_fsr)
2545 env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
2546 set_fsr();
2549 #ifdef TARGET_SPARC64
2550 void helper_ldxfsr(uint64_t new_fsr)
2552 env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
2553 set_fsr();
2555 #endif
2557 void helper_debug(void)
2559 env->exception_index = EXCP_DEBUG;
2560 cpu_loop_exit();
2563 #ifndef TARGET_SPARC64
2564 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2565 handling ? */
2566 void helper_save(void)
2568 uint32_t cwp;
2570 cwp = cpu_cwp_dec(env, env->cwp - 1);
2571 if (env->wim & (1 << cwp)) {
2572 raise_exception(TT_WIN_OVF);
2574 set_cwp(cwp);
2577 void helper_restore(void)
2579 uint32_t cwp;
2581 cwp = cpu_cwp_inc(env, env->cwp + 1);
2582 if (env->wim & (1 << cwp)) {
2583 raise_exception(TT_WIN_UNF);
2585 set_cwp(cwp);
2588 void helper_wrpsr(target_ulong new_psr)
2590 if ((new_psr & PSR_CWP) >= env->nwindows)
2591 raise_exception(TT_ILL_INSN);
2592 else
2593 PUT_PSR(env, new_psr);
2596 target_ulong helper_rdpsr(void)
2598 return GET_PSR(env);
2601 #else
2602 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2603 handling ? */
2604 void helper_save(void)
2606 uint32_t cwp;
2608 cwp = cpu_cwp_dec(env, env->cwp - 1);
2609 if (env->cansave == 0) {
2610 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2611 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2612 ((env->wstate & 0x7) << 2)));
2613 } else {
2614 if (env->cleanwin - env->canrestore == 0) {
2615 // XXX Clean windows without trap
2616 raise_exception(TT_CLRWIN);
2617 } else {
2618 env->cansave--;
2619 env->canrestore++;
2620 set_cwp(cwp);
2625 void helper_restore(void)
2627 uint32_t cwp;
2629 cwp = cpu_cwp_inc(env, env->cwp + 1);
2630 if (env->canrestore == 0) {
2631 raise_exception(TT_FILL | (env->otherwin != 0 ?
2632 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2633 ((env->wstate & 0x7) << 2)));
2634 } else {
2635 env->cansave++;
2636 env->canrestore--;
2637 set_cwp(cwp);
2641 void helper_flushw(void)
2643 if (env->cansave != env->nwindows - 2) {
2644 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2645 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2646 ((env->wstate & 0x7) << 2)));
2650 void helper_saved(void)
2652 env->cansave++;
2653 if (env->otherwin == 0)
2654 env->canrestore--;
2655 else
2656 env->otherwin--;
2659 void helper_restored(void)
2661 env->canrestore++;
2662 if (env->cleanwin < env->nwindows - 1)
2663 env->cleanwin++;
2664 if (env->otherwin == 0)
2665 env->cansave--;
2666 else
2667 env->otherwin--;
2670 target_ulong helper_rdccr(void)
2672 return GET_CCR(env);
2675 void helper_wrccr(target_ulong new_ccr)
2677 PUT_CCR(env, new_ccr);
2680 // CWP handling is reversed in V9, but we still use the V8 register
2681 // order.
2682 target_ulong helper_rdcwp(void)
2684 return GET_CWP64(env);
2687 void helper_wrcwp(target_ulong new_cwp)
2689 PUT_CWP64(env, new_cwp);
2692 // This function uses non-native bit order
2693 #define GET_FIELD(X, FROM, TO) \
2694 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
2696 // This function uses the order in the manuals, i.e. bit 0 is 2^0
2697 #define GET_FIELD_SP(X, FROM, TO) \
2698 GET_FIELD(X, 63 - (TO), 63 - (FROM))
2700 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
2702 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
2703 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
2704 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
2705 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
2706 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
2707 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
2708 (((pixel_addr >> 55) & 1) << 4) |
2709 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
2710 GET_FIELD_SP(pixel_addr, 11, 12);
2713 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
2715 uint64_t tmp;
2717 tmp = addr + offset;
2718 env->gsr &= ~7ULL;
2719 env->gsr |= tmp & 7ULL;
2720 return tmp & ~7ULL;
2723 target_ulong helper_popc(target_ulong val)
2725 return ctpop64(val);
2728 static inline uint64_t *get_gregset(uint64_t pstate)
2730 switch (pstate) {
2731 default:
2732 case 0:
2733 return env->bgregs;
2734 case PS_AG:
2735 return env->agregs;
2736 case PS_MG:
2737 return env->mgregs;
2738 case PS_IG:
2739 return env->igregs;
2743 static inline void change_pstate(uint64_t new_pstate)
2745 uint64_t pstate_regs, new_pstate_regs;
2746 uint64_t *src, *dst;
2748 pstate_regs = env->pstate & 0xc01;
2749 new_pstate_regs = new_pstate & 0xc01;
2750 if (new_pstate_regs != pstate_regs) {
2751 // Switch global register bank
2752 src = get_gregset(new_pstate_regs);
2753 dst = get_gregset(pstate_regs);
2754 memcpy32(dst, env->gregs);
2755 memcpy32(env->gregs, src);
2757 env->pstate = new_pstate;
2760 void helper_wrpstate(target_ulong new_state)
2762 if (!(env->def->features & CPU_FEATURE_GL))
2763 change_pstate(new_state & 0xf3f);
2766 void helper_done(void)
2768 env->pc = env->tsptr->tpc;
2769 env->npc = env->tsptr->tnpc + 4;
2770 PUT_CCR(env, env->tsptr->tstate >> 32);
2771 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2772 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2773 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2774 env->tl--;
2775 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2778 void helper_retry(void)
2780 env->pc = env->tsptr->tpc;
2781 env->npc = env->tsptr->tnpc;
2782 PUT_CCR(env, env->tsptr->tstate >> 32);
2783 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2784 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2785 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2786 env->tl--;
2787 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2789 #endif
2791 void helper_flush(target_ulong addr)
2793 addr &= ~7;
2794 tb_invalidate_page_range(addr, addr + 8);
2797 #ifdef TARGET_SPARC64
2798 #ifdef DEBUG_PCALL
2799 static const char * const excp_names[0x80] = {
2800 [TT_TFAULT] = "Instruction Access Fault",
2801 [TT_TMISS] = "Instruction Access MMU Miss",
2802 [TT_CODE_ACCESS] = "Instruction Access Error",
2803 [TT_ILL_INSN] = "Illegal Instruction",
2804 [TT_PRIV_INSN] = "Privileged Instruction",
2805 [TT_NFPU_INSN] = "FPU Disabled",
2806 [TT_FP_EXCP] = "FPU Exception",
2807 [TT_TOVF] = "Tag Overflow",
2808 [TT_CLRWIN] = "Clean Windows",
2809 [TT_DIV_ZERO] = "Division By Zero",
2810 [TT_DFAULT] = "Data Access Fault",
2811 [TT_DMISS] = "Data Access MMU Miss",
2812 [TT_DATA_ACCESS] = "Data Access Error",
2813 [TT_DPROT] = "Data Protection Error",
2814 [TT_UNALIGNED] = "Unaligned Memory Access",
2815 [TT_PRIV_ACT] = "Privileged Action",
2816 [TT_EXTINT | 0x1] = "External Interrupt 1",
2817 [TT_EXTINT | 0x2] = "External Interrupt 2",
2818 [TT_EXTINT | 0x3] = "External Interrupt 3",
2819 [TT_EXTINT | 0x4] = "External Interrupt 4",
2820 [TT_EXTINT | 0x5] = "External Interrupt 5",
2821 [TT_EXTINT | 0x6] = "External Interrupt 6",
2822 [TT_EXTINT | 0x7] = "External Interrupt 7",
2823 [TT_EXTINT | 0x8] = "External Interrupt 8",
2824 [TT_EXTINT | 0x9] = "External Interrupt 9",
2825 [TT_EXTINT | 0xa] = "External Interrupt 10",
2826 [TT_EXTINT | 0xb] = "External Interrupt 11",
2827 [TT_EXTINT | 0xc] = "External Interrupt 12",
2828 [TT_EXTINT | 0xd] = "External Interrupt 13",
2829 [TT_EXTINT | 0xe] = "External Interrupt 14",
2830 [TT_EXTINT | 0xf] = "External Interrupt 15",
2832 #endif
2834 void do_interrupt(CPUState *env)
2836 int intno = env->exception_index;
2838 #ifdef DEBUG_PCALL
2839 if (loglevel & CPU_LOG_INT) {
2840 static int count;
2841 const char *name;
2843 if (intno < 0 || intno >= 0x180)
2844 name = "Unknown";
2845 else if (intno >= 0x100)
2846 name = "Trap Instruction";
2847 else if (intno >= 0xc0)
2848 name = "Window Fill";
2849 else if (intno >= 0x80)
2850 name = "Window Spill";
2851 else {
2852 name = excp_names[intno];
2853 if (!name)
2854 name = "Unknown";
2857 fprintf(logfile, "%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
2858 " SP=%016" PRIx64 "\n",
2859 count, name, intno,
2860 env->pc,
2861 env->npc, env->regwptr[6]);
2862 cpu_dump_state(env, logfile, fprintf, 0);
2863 #if 0
2865 int i;
2866 uint8_t *ptr;
2868 fprintf(logfile, " code=");
2869 ptr = (uint8_t *)env->pc;
2870 for(i = 0; i < 16; i++) {
2871 fprintf(logfile, " %02x", ldub(ptr + i));
2873 fprintf(logfile, "\n");
2875 #endif
2876 count++;
2878 #endif
2879 #if !defined(CONFIG_USER_ONLY)
2880 if (env->tl >= env->maxtl) {
2881 cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
2882 " Error state", env->exception_index, env->tl, env->maxtl);
2883 return;
2885 #endif
2886 if (env->tl < env->maxtl - 1) {
2887 env->tl++;
2888 } else {
2889 env->pstate |= PS_RED;
2890 if (env->tl < env->maxtl)
2891 env->tl++;
2893 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2894 env->tsptr->tstate = ((uint64_t)GET_CCR(env) << 32) |
2895 ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
2896 GET_CWP64(env);
2897 env->tsptr->tpc = env->pc;
2898 env->tsptr->tnpc = env->npc;
2899 env->tsptr->tt = intno;
2900 if (!(env->def->features & CPU_FEATURE_GL)) {
2901 switch (intno) {
2902 case TT_IVEC:
2903 change_pstate(PS_PEF | PS_PRIV | PS_IG);
2904 break;
2905 case TT_TFAULT:
2906 case TT_TMISS:
2907 case TT_DFAULT:
2908 case TT_DMISS:
2909 case TT_DPROT:
2910 change_pstate(PS_PEF | PS_PRIV | PS_MG);
2911 break;
2912 default:
2913 change_pstate(PS_PEF | PS_PRIV | PS_AG);
2914 break;
2917 if (intno == TT_CLRWIN)
2918 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
2919 else if ((intno & 0x1c0) == TT_SPILL)
2920 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
2921 else if ((intno & 0x1c0) == TT_FILL)
2922 cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
2923 env->tbr &= ~0x7fffULL;
2924 env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
2925 env->pc = env->tbr;
2926 env->npc = env->pc + 4;
2927 env->exception_index = 0;
2929 #else
2930 #ifdef DEBUG_PCALL
2931 static const char * const excp_names[0x80] = {
2932 [TT_TFAULT] = "Instruction Access Fault",
2933 [TT_ILL_INSN] = "Illegal Instruction",
2934 [TT_PRIV_INSN] = "Privileged Instruction",
2935 [TT_NFPU_INSN] = "FPU Disabled",
2936 [TT_WIN_OVF] = "Window Overflow",
2937 [TT_WIN_UNF] = "Window Underflow",
2938 [TT_UNALIGNED] = "Unaligned Memory Access",
2939 [TT_FP_EXCP] = "FPU Exception",
2940 [TT_DFAULT] = "Data Access Fault",
2941 [TT_TOVF] = "Tag Overflow",
2942 [TT_EXTINT | 0x1] = "External Interrupt 1",
2943 [TT_EXTINT | 0x2] = "External Interrupt 2",
2944 [TT_EXTINT | 0x3] = "External Interrupt 3",
2945 [TT_EXTINT | 0x4] = "External Interrupt 4",
2946 [TT_EXTINT | 0x5] = "External Interrupt 5",
2947 [TT_EXTINT | 0x6] = "External Interrupt 6",
2948 [TT_EXTINT | 0x7] = "External Interrupt 7",
2949 [TT_EXTINT | 0x8] = "External Interrupt 8",
2950 [TT_EXTINT | 0x9] = "External Interrupt 9",
2951 [TT_EXTINT | 0xa] = "External Interrupt 10",
2952 [TT_EXTINT | 0xb] = "External Interrupt 11",
2953 [TT_EXTINT | 0xc] = "External Interrupt 12",
2954 [TT_EXTINT | 0xd] = "External Interrupt 13",
2955 [TT_EXTINT | 0xe] = "External Interrupt 14",
2956 [TT_EXTINT | 0xf] = "External Interrupt 15",
2957 [TT_TOVF] = "Tag Overflow",
2958 [TT_CODE_ACCESS] = "Instruction Access Error",
2959 [TT_DATA_ACCESS] = "Data Access Error",
2960 [TT_DIV_ZERO] = "Division By Zero",
2961 [TT_NCP_INSN] = "Coprocessor Disabled",
2963 #endif
2965 void do_interrupt(CPUState *env)
2967 int cwp, intno = env->exception_index;
2969 #ifdef DEBUG_PCALL
2970 if (loglevel & CPU_LOG_INT) {
2971 static int count;
2972 const char *name;
2974 if (intno < 0 || intno >= 0x100)
2975 name = "Unknown";
2976 else if (intno >= 0x80)
2977 name = "Trap Instruction";
2978 else {
2979 name = excp_names[intno];
2980 if (!name)
2981 name = "Unknown";
2984 fprintf(logfile, "%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
2985 count, name, intno,
2986 env->pc,
2987 env->npc, env->regwptr[6]);
2988 cpu_dump_state(env, logfile, fprintf, 0);
2989 #if 0
2991 int i;
2992 uint8_t *ptr;
2994 fprintf(logfile, " code=");
2995 ptr = (uint8_t *)env->pc;
2996 for(i = 0; i < 16; i++) {
2997 fprintf(logfile, " %02x", ldub(ptr + i));
2999 fprintf(logfile, "\n");
3001 #endif
3002 count++;
3004 #endif
3005 #if !defined(CONFIG_USER_ONLY)
3006 if (env->psret == 0) {
3007 cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
3008 env->exception_index);
3009 return;
3011 #endif
3012 env->psret = 0;
3013 cwp = cpu_cwp_dec(env, env->cwp - 1);
3014 cpu_set_cwp(env, cwp);
3015 env->regwptr[9] = env->pc;
3016 env->regwptr[10] = env->npc;
3017 env->psrps = env->psrs;
3018 env->psrs = 1;
3019 env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
3020 env->pc = env->tbr;
3021 env->npc = env->pc + 4;
3022 env->exception_index = 0;
3024 #endif
3026 #if !defined(CONFIG_USER_ONLY)
3028 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3029 void *retaddr);
3031 #define MMUSUFFIX _mmu
3032 #define ALIGNED_ONLY
3034 #define SHIFT 0
3035 #include "softmmu_template.h"
3037 #define SHIFT 1
3038 #include "softmmu_template.h"
3040 #define SHIFT 2
3041 #include "softmmu_template.h"
3043 #define SHIFT 3
3044 #include "softmmu_template.h"
3046 /* XXX: make it generic ? */
3047 static void cpu_restore_state2(void *retaddr)
3049 TranslationBlock *tb;
3050 unsigned long pc;
3052 if (retaddr) {
3053 /* now we have a real cpu fault */
3054 pc = (unsigned long)retaddr;
3055 tb = tb_find_pc(pc);
3056 if (tb) {
3057 /* the PC is inside the translated code. It means that we have
3058 a virtual CPU fault */
3059 cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
3064 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3065 void *retaddr)
3067 #ifdef DEBUG_UNALIGNED
3068 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
3069 "\n", addr, env->pc);
3070 #endif
3071 cpu_restore_state2(retaddr);
3072 raise_exception(TT_UNALIGNED);
3075 /* try to fill the TLB and return an exception if error. If retaddr is
3076 NULL, it means that the function was called in C code (i.e. not
3077 from generated code or from helper.c) */
3078 /* XXX: fix it to restore all registers */
3079 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3081 int ret;
3082 CPUState *saved_env;
3084 /* XXX: hack to restore env in all cases, even if not called from
3085 generated code */
3086 saved_env = env;
3087 env = cpu_single_env;
3089 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3090 if (ret) {
3091 cpu_restore_state2(retaddr);
3092 cpu_loop_exit();
3094 env = saved_env;
3097 #endif
3099 #ifndef TARGET_SPARC64
3100 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3101 int is_asi)
3103 CPUState *saved_env;
3105 /* XXX: hack to restore env in all cases, even if not called from
3106 generated code */
3107 saved_env = env;
3108 env = cpu_single_env;
3109 #ifdef DEBUG_UNASSIGNED
3110 if (is_asi)
3111 printf("Unassigned mem %s access to " TARGET_FMT_plx
3112 " asi 0x%02x from " TARGET_FMT_lx "\n",
3113 is_exec ? "exec" : is_write ? "write" : "read", addr, is_asi,
3114 env->pc);
3115 else
3116 printf("Unassigned mem %s access to " TARGET_FMT_plx " from "
3117 TARGET_FMT_lx "\n",
3118 is_exec ? "exec" : is_write ? "write" : "read", addr, env->pc);
3119 #endif
3120 if (env->mmuregs[3]) /* Fault status register */
3121 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
3122 if (is_asi)
3123 env->mmuregs[3] |= 1 << 16;
3124 if (env->psrs)
3125 env->mmuregs[3] |= 1 << 5;
3126 if (is_exec)
3127 env->mmuregs[3] |= 1 << 6;
3128 if (is_write)
3129 env->mmuregs[3] |= 1 << 7;
3130 env->mmuregs[3] |= (5 << 2) | 2;
3131 env->mmuregs[4] = addr; /* Fault address register */
3132 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
3133 if (is_exec)
3134 raise_exception(TT_CODE_ACCESS);
3135 else
3136 raise_exception(TT_DATA_ACCESS);
3138 env = saved_env;
3140 #else
3141 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3142 int is_asi)
3144 #ifdef DEBUG_UNASSIGNED
3145 CPUState *saved_env;
3147 /* XXX: hack to restore env in all cases, even if not called from
3148 generated code */
3149 saved_env = env;
3150 env = cpu_single_env;
3151 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
3152 "\n", addr, env->pc);
3153 env = saved_env;
3154 #endif
3155 if (is_exec)
3156 raise_exception(TT_CODE_ACCESS);
3157 else
3158 raise_exception(TT_DATA_ACCESS);
3160 #endif