Remove env->ready_for_interrupt_injection
[qemu-kvm/fedora.git] / target-sparc / op_helper.c
blob3c04fe54f99f51f01c10c5a0a544ef0490e00e20
1 #include "exec.h"
2 #include "host-utils.h"
3 #include "helper.h"
4 #if !defined(CONFIG_USER_ONLY)
5 #include "softmmu_exec.h"
6 #endif /* !defined(CONFIG_USER_ONLY) */
8 //#define DEBUG_MMU
9 //#define DEBUG_MXCC
10 //#define DEBUG_UNALIGNED
11 //#define DEBUG_UNASSIGNED
12 //#define DEBUG_ASI
14 #ifdef DEBUG_MMU
15 #define DPRINTF_MMU(fmt, args...) \
16 do { printf("MMU: " fmt , ##args); } while (0)
17 #else
18 #define DPRINTF_MMU(fmt, args...) do {} while (0)
19 #endif
21 #ifdef DEBUG_MXCC
22 #define DPRINTF_MXCC(fmt, args...) \
23 do { printf("MXCC: " fmt , ##args); } while (0)
24 #else
25 #define DPRINTF_MXCC(fmt, args...) do {} while (0)
26 #endif
28 #ifdef DEBUG_ASI
29 #define DPRINTF_ASI(fmt, args...) \
30 do { printf("ASI: " fmt , ##args); } while (0)
31 #else
32 #define DPRINTF_ASI(fmt, args...) do {} while (0)
33 #endif
35 #ifdef TARGET_ABI32
36 #define ABI32_MASK(addr) do { (addr) &= 0xffffffffULL; } while (0)
37 #else
38 #define ABI32_MASK(addr) do {} while (0)
39 #endif
41 void raise_exception(int tt)
43 env->exception_index = tt;
44 cpu_loop_exit();
47 void helper_trap(target_ulong nb_trap)
49 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
50 cpu_loop_exit();
53 void helper_trapcc(target_ulong nb_trap, target_ulong do_trap)
55 if (do_trap) {
56 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
57 cpu_loop_exit();
61 void helper_check_align(target_ulong addr, uint32_t align)
63 if (addr & align) {
64 #ifdef DEBUG_UNALIGNED
65 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
66 "\n", addr, env->pc);
67 #endif
68 raise_exception(TT_UNALIGNED);
72 #define F_HELPER(name, p) void helper_f##name##p(void)
74 #define F_BINOP(name) \
75 F_HELPER(name, s) \
76 { \
77 FT0 = float32_ ## name (FT0, FT1, &env->fp_status); \
78 } \
79 F_HELPER(name, d) \
80 { \
81 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
82 } \
83 F_HELPER(name, q) \
84 { \
85 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
88 F_BINOP(add);
89 F_BINOP(sub);
90 F_BINOP(mul);
91 F_BINOP(div);
92 #undef F_BINOP
94 void helper_fsmuld(void)
96 DT0 = float64_mul(float32_to_float64(FT0, &env->fp_status),
97 float32_to_float64(FT1, &env->fp_status),
98 &env->fp_status);
101 void helper_fdmulq(void)
103 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
104 float64_to_float128(DT1, &env->fp_status),
105 &env->fp_status);
108 F_HELPER(neg, s)
110 FT0 = float32_chs(FT1);
113 #ifdef TARGET_SPARC64
114 F_HELPER(neg, d)
116 DT0 = float64_chs(DT1);
119 F_HELPER(neg, q)
121 QT0 = float128_chs(QT1);
123 #endif
125 /* Integer to float conversion. */
126 F_HELPER(ito, s)
128 FT0 = int32_to_float32(*((int32_t *)&FT1), &env->fp_status);
131 F_HELPER(ito, d)
133 DT0 = int32_to_float64(*((int32_t *)&FT1), &env->fp_status);
136 F_HELPER(ito, q)
138 QT0 = int32_to_float128(*((int32_t *)&FT1), &env->fp_status);
141 #ifdef TARGET_SPARC64
142 F_HELPER(xto, s)
144 FT0 = int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
147 F_HELPER(xto, d)
149 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
152 F_HELPER(xto, q)
154 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
156 #endif
157 #undef F_HELPER
159 /* floating point conversion */
160 void helper_fdtos(void)
162 FT0 = float64_to_float32(DT1, &env->fp_status);
165 void helper_fstod(void)
167 DT0 = float32_to_float64(FT1, &env->fp_status);
170 void helper_fqtos(void)
172 FT0 = float128_to_float32(QT1, &env->fp_status);
175 void helper_fstoq(void)
177 QT0 = float32_to_float128(FT1, &env->fp_status);
180 void helper_fqtod(void)
182 DT0 = float128_to_float64(QT1, &env->fp_status);
185 void helper_fdtoq(void)
187 QT0 = float64_to_float128(DT1, &env->fp_status);
190 /* Float to integer conversion. */
191 void helper_fstoi(void)
193 *((int32_t *)&FT0) = float32_to_int32_round_to_zero(FT1, &env->fp_status);
196 void helper_fdtoi(void)
198 *((int32_t *)&FT0) = float64_to_int32_round_to_zero(DT1, &env->fp_status);
201 void helper_fqtoi(void)
203 *((int32_t *)&FT0) = float128_to_int32_round_to_zero(QT1, &env->fp_status);
206 #ifdef TARGET_SPARC64
207 void helper_fstox(void)
209 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(FT1, &env->fp_status);
212 void helper_fdtox(void)
214 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
217 void helper_fqtox(void)
219 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
222 void helper_faligndata(void)
224 uint64_t tmp;
226 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
227 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
228 *((uint64_t *)&DT0) = tmp;
231 void helper_movl_FT0_0(void)
233 *((uint32_t *)&FT0) = 0;
236 void helper_movl_DT0_0(void)
238 *((uint64_t *)&DT0) = 0;
241 void helper_movl_FT0_1(void)
243 *((uint32_t *)&FT0) = 0xffffffff;
246 void helper_movl_DT0_1(void)
248 *((uint64_t *)&DT0) = 0xffffffffffffffffULL;
251 void helper_fnot(void)
253 *(uint64_t *)&DT0 = ~*(uint64_t *)&DT1;
256 void helper_fnots(void)
258 *(uint32_t *)&FT0 = ~*(uint32_t *)&FT1;
261 void helper_fnor(void)
263 *(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 | *(uint64_t *)&DT1);
266 void helper_fnors(void)
268 *(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 | *(uint32_t *)&FT1);
271 void helper_for(void)
273 *(uint64_t *)&DT0 |= *(uint64_t *)&DT1;
276 void helper_fors(void)
278 *(uint32_t *)&FT0 |= *(uint32_t *)&FT1;
281 void helper_fxor(void)
283 *(uint64_t *)&DT0 ^= *(uint64_t *)&DT1;
286 void helper_fxors(void)
288 *(uint32_t *)&FT0 ^= *(uint32_t *)&FT1;
291 void helper_fand(void)
293 *(uint64_t *)&DT0 &= *(uint64_t *)&DT1;
296 void helper_fands(void)
298 *(uint32_t *)&FT0 &= *(uint32_t *)&FT1;
301 void helper_fornot(void)
303 *(uint64_t *)&DT0 = *(uint64_t *)&DT0 | ~*(uint64_t *)&DT1;
306 void helper_fornots(void)
308 *(uint32_t *)&FT0 = *(uint32_t *)&FT0 | ~*(uint32_t *)&FT1;
311 void helper_fandnot(void)
313 *(uint64_t *)&DT0 = *(uint64_t *)&DT0 & ~*(uint64_t *)&DT1;
316 void helper_fandnots(void)
318 *(uint32_t *)&FT0 = *(uint32_t *)&FT0 & ~*(uint32_t *)&FT1;
321 void helper_fnand(void)
323 *(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 & *(uint64_t *)&DT1);
326 void helper_fnands(void)
328 *(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 & *(uint32_t *)&FT1);
331 void helper_fxnor(void)
333 *(uint64_t *)&DT0 ^= ~*(uint64_t *)&DT1;
336 void helper_fxnors(void)
338 *(uint32_t *)&FT0 ^= ~*(uint32_t *)&FT1;
341 #ifdef WORDS_BIGENDIAN
342 #define VIS_B64(n) b[7 - (n)]
343 #define VIS_W64(n) w[3 - (n)]
344 #define VIS_SW64(n) sw[3 - (n)]
345 #define VIS_L64(n) l[1 - (n)]
346 #define VIS_B32(n) b[3 - (n)]
347 #define VIS_W32(n) w[1 - (n)]
348 #else
349 #define VIS_B64(n) b[n]
350 #define VIS_W64(n) w[n]
351 #define VIS_SW64(n) sw[n]
352 #define VIS_L64(n) l[n]
353 #define VIS_B32(n) b[n]
354 #define VIS_W32(n) w[n]
355 #endif
357 typedef union {
358 uint8_t b[8];
359 uint16_t w[4];
360 int16_t sw[4];
361 uint32_t l[2];
362 float64 d;
363 } vis64;
365 typedef union {
366 uint8_t b[4];
367 uint16_t w[2];
368 uint32_t l;
369 float32 f;
370 } vis32;
372 void helper_fpmerge(void)
374 vis64 s, d;
376 s.d = DT0;
377 d.d = DT1;
379 // Reverse calculation order to handle overlap
380 d.VIS_B64(7) = s.VIS_B64(3);
381 d.VIS_B64(6) = d.VIS_B64(3);
382 d.VIS_B64(5) = s.VIS_B64(2);
383 d.VIS_B64(4) = d.VIS_B64(2);
384 d.VIS_B64(3) = s.VIS_B64(1);
385 d.VIS_B64(2) = d.VIS_B64(1);
386 d.VIS_B64(1) = s.VIS_B64(0);
387 //d.VIS_B64(0) = d.VIS_B64(0);
389 DT0 = d.d;
392 void helper_fmul8x16(void)
394 vis64 s, d;
395 uint32_t tmp;
397 s.d = DT0;
398 d.d = DT1;
400 #define PMUL(r) \
401 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
402 if ((tmp & 0xff) > 0x7f) \
403 tmp += 0x100; \
404 d.VIS_W64(r) = tmp >> 8;
406 PMUL(0);
407 PMUL(1);
408 PMUL(2);
409 PMUL(3);
410 #undef PMUL
412 DT0 = d.d;
415 void helper_fmul8x16al(void)
417 vis64 s, d;
418 uint32_t tmp;
420 s.d = DT0;
421 d.d = DT1;
423 #define PMUL(r) \
424 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
425 if ((tmp & 0xff) > 0x7f) \
426 tmp += 0x100; \
427 d.VIS_W64(r) = tmp >> 8;
429 PMUL(0);
430 PMUL(1);
431 PMUL(2);
432 PMUL(3);
433 #undef PMUL
435 DT0 = d.d;
438 void helper_fmul8x16au(void)
440 vis64 s, d;
441 uint32_t tmp;
443 s.d = DT0;
444 d.d = DT1;
446 #define PMUL(r) \
447 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
448 if ((tmp & 0xff) > 0x7f) \
449 tmp += 0x100; \
450 d.VIS_W64(r) = tmp >> 8;
452 PMUL(0);
453 PMUL(1);
454 PMUL(2);
455 PMUL(3);
456 #undef PMUL
458 DT0 = d.d;
461 void helper_fmul8sux16(void)
463 vis64 s, d;
464 uint32_t tmp;
466 s.d = DT0;
467 d.d = DT1;
469 #define PMUL(r) \
470 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
471 if ((tmp & 0xff) > 0x7f) \
472 tmp += 0x100; \
473 d.VIS_W64(r) = tmp >> 8;
475 PMUL(0);
476 PMUL(1);
477 PMUL(2);
478 PMUL(3);
479 #undef PMUL
481 DT0 = d.d;
484 void helper_fmul8ulx16(void)
486 vis64 s, d;
487 uint32_t tmp;
489 s.d = DT0;
490 d.d = DT1;
492 #define PMUL(r) \
493 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
494 if ((tmp & 0xff) > 0x7f) \
495 tmp += 0x100; \
496 d.VIS_W64(r) = tmp >> 8;
498 PMUL(0);
499 PMUL(1);
500 PMUL(2);
501 PMUL(3);
502 #undef PMUL
504 DT0 = d.d;
507 void helper_fmuld8sux16(void)
509 vis64 s, d;
510 uint32_t tmp;
512 s.d = DT0;
513 d.d = DT1;
515 #define PMUL(r) \
516 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
517 if ((tmp & 0xff) > 0x7f) \
518 tmp += 0x100; \
519 d.VIS_L64(r) = tmp;
521 // Reverse calculation order to handle overlap
522 PMUL(1);
523 PMUL(0);
524 #undef PMUL
526 DT0 = d.d;
529 void helper_fmuld8ulx16(void)
531 vis64 s, d;
532 uint32_t tmp;
534 s.d = DT0;
535 d.d = DT1;
537 #define PMUL(r) \
538 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
539 if ((tmp & 0xff) > 0x7f) \
540 tmp += 0x100; \
541 d.VIS_L64(r) = tmp;
543 // Reverse calculation order to handle overlap
544 PMUL(1);
545 PMUL(0);
546 #undef PMUL
548 DT0 = d.d;
551 void helper_fexpand(void)
553 vis32 s;
554 vis64 d;
556 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
557 d.d = DT1;
558 d.VIS_L64(0) = s.VIS_W32(0) << 4;
559 d.VIS_L64(1) = s.VIS_W32(1) << 4;
560 d.VIS_L64(2) = s.VIS_W32(2) << 4;
561 d.VIS_L64(3) = s.VIS_W32(3) << 4;
563 DT0 = d.d;
566 #define VIS_HELPER(name, F) \
567 void name##16(void) \
569 vis64 s, d; \
571 s.d = DT0; \
572 d.d = DT1; \
574 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
575 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
576 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
577 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
579 DT0 = d.d; \
582 void name##16s(void) \
584 vis32 s, d; \
586 s.f = FT0; \
587 d.f = FT1; \
589 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
590 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
592 FT0 = d.f; \
595 void name##32(void) \
597 vis64 s, d; \
599 s.d = DT0; \
600 d.d = DT1; \
602 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
603 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
605 DT0 = d.d; \
608 void name##32s(void) \
610 vis32 s, d; \
612 s.f = FT0; \
613 d.f = FT1; \
615 d.l = F(d.l, s.l); \
617 FT0 = d.f; \
620 #define FADD(a, b) ((a) + (b))
621 #define FSUB(a, b) ((a) - (b))
622 VIS_HELPER(helper_fpadd, FADD)
623 VIS_HELPER(helper_fpsub, FSUB)
625 #define VIS_CMPHELPER(name, F) \
626 void name##16(void) \
628 vis64 s, d; \
630 s.d = DT0; \
631 d.d = DT1; \
633 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
634 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
635 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
636 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
638 DT0 = d.d; \
641 void name##32(void) \
643 vis64 s, d; \
645 s.d = DT0; \
646 d.d = DT1; \
648 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
649 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
651 DT0 = d.d; \
654 #define FCMPGT(a, b) ((a) > (b))
655 #define FCMPEQ(a, b) ((a) == (b))
656 #define FCMPLE(a, b) ((a) <= (b))
657 #define FCMPNE(a, b) ((a) != (b))
659 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
660 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
661 VIS_CMPHELPER(helper_fcmple, FCMPLE)
662 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
663 #endif
665 void helper_check_ieee_exceptions(void)
667 target_ulong status;
669 status = get_float_exception_flags(&env->fp_status);
670 if (status) {
671 /* Copy IEEE 754 flags into FSR */
672 if (status & float_flag_invalid)
673 env->fsr |= FSR_NVC;
674 if (status & float_flag_overflow)
675 env->fsr |= FSR_OFC;
676 if (status & float_flag_underflow)
677 env->fsr |= FSR_UFC;
678 if (status & float_flag_divbyzero)
679 env->fsr |= FSR_DZC;
680 if (status & float_flag_inexact)
681 env->fsr |= FSR_NXC;
683 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
684 /* Unmasked exception, generate a trap */
685 env->fsr |= FSR_FTT_IEEE_EXCP;
686 raise_exception(TT_FP_EXCP);
687 } else {
688 /* Accumulate exceptions */
689 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
694 void helper_clear_float_exceptions(void)
696 set_float_exception_flags(0, &env->fp_status);
699 void helper_fabss(void)
701 FT0 = float32_abs(FT1);
704 #ifdef TARGET_SPARC64
705 void helper_fabsd(void)
707 DT0 = float64_abs(DT1);
710 void helper_fabsq(void)
712 QT0 = float128_abs(QT1);
714 #endif
716 void helper_fsqrts(void)
718 FT0 = float32_sqrt(FT1, &env->fp_status);
721 void helper_fsqrtd(void)
723 DT0 = float64_sqrt(DT1, &env->fp_status);
726 void helper_fsqrtq(void)
728 QT0 = float128_sqrt(QT1, &env->fp_status);
731 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
732 void glue(helper_, name) (void) \
734 target_ulong new_fsr; \
736 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
737 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
738 case float_relation_unordered: \
739 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
740 if ((env->fsr & FSR_NVM) || TRAP) { \
741 env->fsr |= new_fsr; \
742 env->fsr |= FSR_NVC; \
743 env->fsr |= FSR_FTT_IEEE_EXCP; \
744 raise_exception(TT_FP_EXCP); \
745 } else { \
746 env->fsr |= FSR_NVA; \
748 break; \
749 case float_relation_less: \
750 new_fsr = FSR_FCC0 << FS; \
751 break; \
752 case float_relation_greater: \
753 new_fsr = FSR_FCC1 << FS; \
754 break; \
755 default: \
756 new_fsr = 0; \
757 break; \
759 env->fsr |= new_fsr; \
762 GEN_FCMP(fcmps, float32, FT0, FT1, 0, 0);
763 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
765 GEN_FCMP(fcmpes, float32, FT0, FT1, 0, 1);
766 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
768 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
769 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
771 #ifdef TARGET_SPARC64
772 GEN_FCMP(fcmps_fcc1, float32, FT0, FT1, 22, 0);
773 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
774 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
776 GEN_FCMP(fcmps_fcc2, float32, FT0, FT1, 24, 0);
777 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
778 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
780 GEN_FCMP(fcmps_fcc3, float32, FT0, FT1, 26, 0);
781 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
782 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
784 GEN_FCMP(fcmpes_fcc1, float32, FT0, FT1, 22, 1);
785 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
786 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
788 GEN_FCMP(fcmpes_fcc2, float32, FT0, FT1, 24, 1);
789 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
790 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
792 GEN_FCMP(fcmpes_fcc3, float32, FT0, FT1, 26, 1);
793 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
794 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
795 #endif
797 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
798 defined(DEBUG_MXCC)
799 static void dump_mxcc(CPUState *env)
801 printf("mxccdata: %016llx %016llx %016llx %016llx\n",
802 env->mxccdata[0], env->mxccdata[1],
803 env->mxccdata[2], env->mxccdata[3]);
804 printf("mxccregs: %016llx %016llx %016llx %016llx\n"
805 " %016llx %016llx %016llx %016llx\n",
806 env->mxccregs[0], env->mxccregs[1],
807 env->mxccregs[2], env->mxccregs[3],
808 env->mxccregs[4], env->mxccregs[5],
809 env->mxccregs[6], env->mxccregs[7]);
811 #endif
813 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
814 && defined(DEBUG_ASI)
815 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
816 uint64_t r1)
818 switch (size)
820 case 1:
821 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
822 addr, asi, r1 & 0xff);
823 break;
824 case 2:
825 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
826 addr, asi, r1 & 0xffff);
827 break;
828 case 4:
829 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
830 addr, asi, r1 & 0xffffffff);
831 break;
832 case 8:
833 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
834 addr, asi, r1);
835 break;
838 #endif
840 #ifndef TARGET_SPARC64
841 #ifndef CONFIG_USER_ONLY
842 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
844 uint64_t ret = 0;
845 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
846 uint32_t last_addr = addr;
847 #endif
849 helper_check_align(addr, size - 1);
850 switch (asi) {
851 case 2: /* SuperSparc MXCC registers */
852 switch (addr) {
853 case 0x01c00a00: /* MXCC control register */
854 if (size == 8)
855 ret = env->mxccregs[3];
856 else
857 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
858 size);
859 break;
860 case 0x01c00a04: /* MXCC control register */
861 if (size == 4)
862 ret = env->mxccregs[3];
863 else
864 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
865 size);
866 break;
867 case 0x01c00c00: /* Module reset register */
868 if (size == 8) {
869 ret = env->mxccregs[5];
870 // should we do something here?
871 } else
872 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
873 size);
874 break;
875 case 0x01c00f00: /* MBus port address register */
876 if (size == 8)
877 ret = env->mxccregs[7];
878 else
879 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
880 size);
881 break;
882 default:
883 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
884 size);
885 break;
887 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
888 "addr = %08x -> ret = %08x,"
889 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
890 #ifdef DEBUG_MXCC
891 dump_mxcc(env);
892 #endif
893 break;
894 case 3: /* MMU probe */
896 int mmulev;
898 mmulev = (addr >> 8) & 15;
899 if (mmulev > 4)
900 ret = 0;
901 else
902 ret = mmu_probe(env, addr, mmulev);
903 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
904 addr, mmulev, ret);
906 break;
907 case 4: /* read MMU regs */
909 int reg = (addr >> 8) & 0x1f;
911 ret = env->mmuregs[reg];
912 if (reg == 3) /* Fault status cleared on read */
913 env->mmuregs[3] = 0;
914 else if (reg == 0x13) /* Fault status read */
915 ret = env->mmuregs[3];
916 else if (reg == 0x14) /* Fault address read */
917 ret = env->mmuregs[4];
918 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
920 break;
921 case 5: // Turbosparc ITLB Diagnostic
922 case 6: // Turbosparc DTLB Diagnostic
923 case 7: // Turbosparc IOTLB Diagnostic
924 break;
925 case 9: /* Supervisor code access */
926 switch(size) {
927 case 1:
928 ret = ldub_code(addr);
929 break;
930 case 2:
931 ret = lduw_code(addr);
932 break;
933 default:
934 case 4:
935 ret = ldl_code(addr);
936 break;
937 case 8:
938 ret = ldq_code(addr);
939 break;
941 break;
942 case 0xa: /* User data access */
943 switch(size) {
944 case 1:
945 ret = ldub_user(addr);
946 break;
947 case 2:
948 ret = lduw_user(addr);
949 break;
950 default:
951 case 4:
952 ret = ldl_user(addr);
953 break;
954 case 8:
955 ret = ldq_user(addr);
956 break;
958 break;
959 case 0xb: /* Supervisor data access */
960 switch(size) {
961 case 1:
962 ret = ldub_kernel(addr);
963 break;
964 case 2:
965 ret = lduw_kernel(addr);
966 break;
967 default:
968 case 4:
969 ret = ldl_kernel(addr);
970 break;
971 case 8:
972 ret = ldq_kernel(addr);
973 break;
975 break;
976 case 0xc: /* I-cache tag */
977 case 0xd: /* I-cache data */
978 case 0xe: /* D-cache tag */
979 case 0xf: /* D-cache data */
980 break;
981 case 0x20: /* MMU passthrough */
982 switch(size) {
983 case 1:
984 ret = ldub_phys(addr);
985 break;
986 case 2:
987 ret = lduw_phys(addr);
988 break;
989 default:
990 case 4:
991 ret = ldl_phys(addr);
992 break;
993 case 8:
994 ret = ldq_phys(addr);
995 break;
997 break;
998 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
999 switch(size) {
1000 case 1:
1001 ret = ldub_phys((target_phys_addr_t)addr
1002 | ((target_phys_addr_t)(asi & 0xf) << 32));
1003 break;
1004 case 2:
1005 ret = lduw_phys((target_phys_addr_t)addr
1006 | ((target_phys_addr_t)(asi & 0xf) << 32));
1007 break;
1008 default:
1009 case 4:
1010 ret = ldl_phys((target_phys_addr_t)addr
1011 | ((target_phys_addr_t)(asi & 0xf) << 32));
1012 break;
1013 case 8:
1014 ret = ldq_phys((target_phys_addr_t)addr
1015 | ((target_phys_addr_t)(asi & 0xf) << 32));
1016 break;
1018 break;
1019 case 0x30: // Turbosparc secondary cache diagnostic
1020 case 0x31: // Turbosparc RAM snoop
1021 case 0x32: // Turbosparc page table descriptor diagnostic
1022 case 0x39: /* data cache diagnostic register */
1023 ret = 0;
1024 break;
1025 case 8: /* User code access, XXX */
1026 default:
1027 do_unassigned_access(addr, 0, 0, asi);
1028 ret = 0;
1029 break;
1031 if (sign) {
1032 switch(size) {
1033 case 1:
1034 ret = (int8_t) ret;
1035 break;
1036 case 2:
1037 ret = (int16_t) ret;
1038 break;
1039 case 4:
1040 ret = (int32_t) ret;
1041 break;
1042 default:
1043 break;
1046 #ifdef DEBUG_ASI
1047 dump_asi("read ", last_addr, asi, size, ret);
1048 #endif
1049 return ret;
1052 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1054 helper_check_align(addr, size - 1);
1055 switch(asi) {
1056 case 2: /* SuperSparc MXCC registers */
1057 switch (addr) {
1058 case 0x01c00000: /* MXCC stream data register 0 */
1059 if (size == 8)
1060 env->mxccdata[0] = val;
1061 else
1062 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1063 size);
1064 break;
1065 case 0x01c00008: /* MXCC stream data register 1 */
1066 if (size == 8)
1067 env->mxccdata[1] = val;
1068 else
1069 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1070 size);
1071 break;
1072 case 0x01c00010: /* MXCC stream data register 2 */
1073 if (size == 8)
1074 env->mxccdata[2] = val;
1075 else
1076 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1077 size);
1078 break;
1079 case 0x01c00018: /* MXCC stream data register 3 */
1080 if (size == 8)
1081 env->mxccdata[3] = val;
1082 else
1083 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1084 size);
1085 break;
1086 case 0x01c00100: /* MXCC stream source */
1087 if (size == 8)
1088 env->mxccregs[0] = val;
1089 else
1090 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1091 size);
1092 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1094 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1096 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1097 16);
1098 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1099 24);
1100 break;
1101 case 0x01c00200: /* MXCC stream destination */
1102 if (size == 8)
1103 env->mxccregs[1] = val;
1104 else
1105 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1106 size);
1107 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
1108 env->mxccdata[0]);
1109 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
1110 env->mxccdata[1]);
1111 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1112 env->mxccdata[2]);
1113 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1114 env->mxccdata[3]);
1115 break;
1116 case 0x01c00a00: /* MXCC control register */
1117 if (size == 8)
1118 env->mxccregs[3] = val;
1119 else
1120 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1121 size);
1122 break;
1123 case 0x01c00a04: /* MXCC control register */
1124 if (size == 4)
1125 env->mxccregs[3] = (env->mxccregs[0xa] & 0xffffffff00000000ULL)
1126 | val;
1127 else
1128 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1129 size);
1130 break;
1131 case 0x01c00e00: /* MXCC error register */
1132 // writing a 1 bit clears the error
1133 if (size == 8)
1134 env->mxccregs[6] &= ~val;
1135 else
1136 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1137 size);
1138 break;
1139 case 0x01c00f00: /* MBus port address register */
1140 if (size == 8)
1141 env->mxccregs[7] = val;
1142 else
1143 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1144 size);
1145 break;
1146 default:
1147 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1148 size);
1149 break;
1151 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %08x\n", asi,
1152 size, addr, val);
1153 #ifdef DEBUG_MXCC
1154 dump_mxcc(env);
1155 #endif
1156 break;
1157 case 3: /* MMU flush */
1159 int mmulev;
1161 mmulev = (addr >> 8) & 15;
1162 DPRINTF_MMU("mmu flush level %d\n", mmulev);
1163 switch (mmulev) {
1164 case 0: // flush page
1165 tlb_flush_page(env, addr & 0xfffff000);
1166 break;
1167 case 1: // flush segment (256k)
1168 case 2: // flush region (16M)
1169 case 3: // flush context (4G)
1170 case 4: // flush entire
1171 tlb_flush(env, 1);
1172 break;
1173 default:
1174 break;
1176 #ifdef DEBUG_MMU
1177 dump_mmu(env);
1178 #endif
1180 break;
1181 case 4: /* write MMU regs */
1183 int reg = (addr >> 8) & 0x1f;
1184 uint32_t oldreg;
1186 oldreg = env->mmuregs[reg];
1187 switch(reg) {
1188 case 0: // Control Register
1189 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1190 (val & 0x00ffffff);
1191 // Mappings generated during no-fault mode or MMU
1192 // disabled mode are invalid in normal mode
1193 if ((oldreg & (MMU_E | MMU_NF | env->mmu_bm)) !=
1194 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->mmu_bm)))
1195 tlb_flush(env, 1);
1196 break;
1197 case 1: // Context Table Pointer Register
1198 env->mmuregs[reg] = val & env->mmu_ctpr_mask;
1199 break;
1200 case 2: // Context Register
1201 env->mmuregs[reg] = val & env->mmu_cxr_mask;
1202 if (oldreg != env->mmuregs[reg]) {
1203 /* we flush when the MMU context changes because
1204 QEMU has no MMU context support */
1205 tlb_flush(env, 1);
1207 break;
1208 case 3: // Synchronous Fault Status Register with Clear
1209 case 4: // Synchronous Fault Address Register
1210 break;
1211 case 0x10: // TLB Replacement Control Register
1212 env->mmuregs[reg] = val & env->mmu_trcr_mask;
1213 break;
1214 case 0x13: // Synchronous Fault Status Register with Read and Clear
1215 env->mmuregs[3] = val & env->mmu_sfsr_mask;
1216 break;
1217 case 0x14: // Synchronous Fault Address Register
1218 env->mmuregs[4] = val;
1219 break;
1220 default:
1221 env->mmuregs[reg] = val;
1222 break;
1224 if (oldreg != env->mmuregs[reg]) {
1225 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1226 reg, oldreg, env->mmuregs[reg]);
1228 #ifdef DEBUG_MMU
1229 dump_mmu(env);
1230 #endif
1232 break;
1233 case 5: // Turbosparc ITLB Diagnostic
1234 case 6: // Turbosparc DTLB Diagnostic
1235 case 7: // Turbosparc IOTLB Diagnostic
1236 break;
1237 case 0xa: /* User data access */
1238 switch(size) {
1239 case 1:
1240 stb_user(addr, val);
1241 break;
1242 case 2:
1243 stw_user(addr, val);
1244 break;
1245 default:
1246 case 4:
1247 stl_user(addr, val);
1248 break;
1249 case 8:
1250 stq_user(addr, val);
1251 break;
1253 break;
1254 case 0xb: /* Supervisor data access */
1255 switch(size) {
1256 case 1:
1257 stb_kernel(addr, val);
1258 break;
1259 case 2:
1260 stw_kernel(addr, val);
1261 break;
1262 default:
1263 case 4:
1264 stl_kernel(addr, val);
1265 break;
1266 case 8:
1267 stq_kernel(addr, val);
1268 break;
1270 break;
1271 case 0xc: /* I-cache tag */
1272 case 0xd: /* I-cache data */
1273 case 0xe: /* D-cache tag */
1274 case 0xf: /* D-cache data */
1275 case 0x10: /* I/D-cache flush page */
1276 case 0x11: /* I/D-cache flush segment */
1277 case 0x12: /* I/D-cache flush region */
1278 case 0x13: /* I/D-cache flush context */
1279 case 0x14: /* I/D-cache flush user */
1280 break;
1281 case 0x17: /* Block copy, sta access */
1283 // val = src
1284 // addr = dst
1285 // copy 32 bytes
1286 unsigned int i;
1287 uint32_t src = val & ~3, dst = addr & ~3, temp;
1289 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
1290 temp = ldl_kernel(src);
1291 stl_kernel(dst, temp);
1294 break;
1295 case 0x1f: /* Block fill, stda access */
1297 // addr = dst
1298 // fill 32 bytes with val
1299 unsigned int i;
1300 uint32_t dst = addr & 7;
1302 for (i = 0; i < 32; i += 8, dst += 8)
1303 stq_kernel(dst, val);
1305 break;
1306 case 0x20: /* MMU passthrough */
1308 switch(size) {
1309 case 1:
1310 stb_phys(addr, val);
1311 break;
1312 case 2:
1313 stw_phys(addr, val);
1314 break;
1315 case 4:
1316 default:
1317 stl_phys(addr, val);
1318 break;
1319 case 8:
1320 stq_phys(addr, val);
1321 break;
1324 break;
1325 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1327 switch(size) {
1328 case 1:
1329 stb_phys((target_phys_addr_t)addr
1330 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1331 break;
1332 case 2:
1333 stw_phys((target_phys_addr_t)addr
1334 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1335 break;
1336 case 4:
1337 default:
1338 stl_phys((target_phys_addr_t)addr
1339 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1340 break;
1341 case 8:
1342 stq_phys((target_phys_addr_t)addr
1343 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1344 break;
1347 break;
1348 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1349 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1350 // Turbosparc snoop RAM
1351 case 0x32: // store buffer control or Turbosparc page table
1352 // descriptor diagnostic
1353 case 0x36: /* I-cache flash clear */
1354 case 0x37: /* D-cache flash clear */
1355 case 0x38: /* breakpoint diagnostics */
1356 case 0x4c: /* breakpoint action */
1357 break;
1358 case 8: /* User code access, XXX */
1359 case 9: /* Supervisor code access, XXX */
1360 default:
1361 do_unassigned_access(addr, 1, 0, asi);
1362 break;
1364 #ifdef DEBUG_ASI
1365 dump_asi("write", addr, asi, size, val);
1366 #endif
1369 #endif /* CONFIG_USER_ONLY */
1370 #else /* TARGET_SPARC64 */
1372 #ifdef CONFIG_USER_ONLY
1373 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1375 uint64_t ret = 0;
1376 #if defined(DEBUG_ASI)
1377 target_ulong last_addr = addr;
1378 #endif
1380 if (asi < 0x80)
1381 raise_exception(TT_PRIV_ACT);
1383 helper_check_align(addr, size - 1);
1384 ABI32_MASK(addr);
1386 switch (asi) {
1387 case 0x80: // Primary
1388 case 0x82: // Primary no-fault
1389 case 0x88: // Primary LE
1390 case 0x8a: // Primary no-fault LE
1392 switch(size) {
1393 case 1:
1394 ret = ldub_raw(addr);
1395 break;
1396 case 2:
1397 ret = lduw_raw(addr);
1398 break;
1399 case 4:
1400 ret = ldl_raw(addr);
1401 break;
1402 default:
1403 case 8:
1404 ret = ldq_raw(addr);
1405 break;
1408 break;
1409 case 0x81: // Secondary
1410 case 0x83: // Secondary no-fault
1411 case 0x89: // Secondary LE
1412 case 0x8b: // Secondary no-fault LE
1413 // XXX
1414 break;
1415 default:
1416 break;
1419 /* Convert from little endian */
1420 switch (asi) {
1421 case 0x88: // Primary LE
1422 case 0x89: // Secondary LE
1423 case 0x8a: // Primary no-fault LE
1424 case 0x8b: // Secondary no-fault LE
1425 switch(size) {
1426 case 2:
1427 ret = bswap16(ret);
1428 break;
1429 case 4:
1430 ret = bswap32(ret);
1431 break;
1432 case 8:
1433 ret = bswap64(ret);
1434 break;
1435 default:
1436 break;
1438 default:
1439 break;
1442 /* Convert to signed number */
1443 if (sign) {
1444 switch(size) {
1445 case 1:
1446 ret = (int8_t) ret;
1447 break;
1448 case 2:
1449 ret = (int16_t) ret;
1450 break;
1451 case 4:
1452 ret = (int32_t) ret;
1453 break;
1454 default:
1455 break;
1458 #ifdef DEBUG_ASI
1459 dump_asi("read ", last_addr, asi, size, ret);
1460 #endif
1461 return ret;
1464 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1466 #ifdef DEBUG_ASI
1467 dump_asi("write", addr, asi, size, val);
1468 #endif
1469 if (asi < 0x80)
1470 raise_exception(TT_PRIV_ACT);
1472 helper_check_align(addr, size - 1);
1473 ABI32_MASK(addr);
1475 /* Convert to little endian */
1476 switch (asi) {
1477 case 0x88: // Primary LE
1478 case 0x89: // Secondary LE
1479 switch(size) {
1480 case 2:
1481 addr = bswap16(addr);
1482 break;
1483 case 4:
1484 addr = bswap32(addr);
1485 break;
1486 case 8:
1487 addr = bswap64(addr);
1488 break;
1489 default:
1490 break;
1492 default:
1493 break;
1496 switch(asi) {
1497 case 0x80: // Primary
1498 case 0x88: // Primary LE
1500 switch(size) {
1501 case 1:
1502 stb_raw(addr, val);
1503 break;
1504 case 2:
1505 stw_raw(addr, val);
1506 break;
1507 case 4:
1508 stl_raw(addr, val);
1509 break;
1510 case 8:
1511 default:
1512 stq_raw(addr, val);
1513 break;
1516 break;
1517 case 0x81: // Secondary
1518 case 0x89: // Secondary LE
1519 // XXX
1520 return;
1522 case 0x82: // Primary no-fault, RO
1523 case 0x83: // Secondary no-fault, RO
1524 case 0x8a: // Primary no-fault LE, RO
1525 case 0x8b: // Secondary no-fault LE, RO
1526 default:
1527 do_unassigned_access(addr, 1, 0, 1);
1528 return;
1532 #else /* CONFIG_USER_ONLY */
1534 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1536 uint64_t ret = 0;
1537 #if defined(DEBUG_ASI)
1538 target_ulong last_addr = addr;
1539 #endif
1541 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1542 || (asi >= 0x30 && asi < 0x80 && !(env->hpstate & HS_PRIV)))
1543 raise_exception(TT_PRIV_ACT);
1545 helper_check_align(addr, size - 1);
1546 switch (asi) {
1547 case 0x10: // As if user primary
1548 case 0x18: // As if user primary LE
1549 case 0x80: // Primary
1550 case 0x82: // Primary no-fault
1551 case 0x88: // Primary LE
1552 case 0x8a: // Primary no-fault LE
1553 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1554 if (env->hpstate & HS_PRIV) {
1555 switch(size) {
1556 case 1:
1557 ret = ldub_hypv(addr);
1558 break;
1559 case 2:
1560 ret = lduw_hypv(addr);
1561 break;
1562 case 4:
1563 ret = ldl_hypv(addr);
1564 break;
1565 default:
1566 case 8:
1567 ret = ldq_hypv(addr);
1568 break;
1570 } else {
1571 switch(size) {
1572 case 1:
1573 ret = ldub_kernel(addr);
1574 break;
1575 case 2:
1576 ret = lduw_kernel(addr);
1577 break;
1578 case 4:
1579 ret = ldl_kernel(addr);
1580 break;
1581 default:
1582 case 8:
1583 ret = ldq_kernel(addr);
1584 break;
1587 } else {
1588 switch(size) {
1589 case 1:
1590 ret = ldub_user(addr);
1591 break;
1592 case 2:
1593 ret = lduw_user(addr);
1594 break;
1595 case 4:
1596 ret = ldl_user(addr);
1597 break;
1598 default:
1599 case 8:
1600 ret = ldq_user(addr);
1601 break;
1604 break;
1605 case 0x14: // Bypass
1606 case 0x15: // Bypass, non-cacheable
1607 case 0x1c: // Bypass LE
1608 case 0x1d: // Bypass, non-cacheable LE
1610 switch(size) {
1611 case 1:
1612 ret = ldub_phys(addr);
1613 break;
1614 case 2:
1615 ret = lduw_phys(addr);
1616 break;
1617 case 4:
1618 ret = ldl_phys(addr);
1619 break;
1620 default:
1621 case 8:
1622 ret = ldq_phys(addr);
1623 break;
1625 break;
1627 case 0x04: // Nucleus
1628 case 0x0c: // Nucleus Little Endian (LE)
1629 case 0x11: // As if user secondary
1630 case 0x19: // As if user secondary LE
1631 case 0x24: // Nucleus quad LDD 128 bit atomic
1632 case 0x2c: // Nucleus quad LDD 128 bit atomic
1633 case 0x4a: // UPA config
1634 case 0x81: // Secondary
1635 case 0x83: // Secondary no-fault
1636 case 0x89: // Secondary LE
1637 case 0x8b: // Secondary no-fault LE
1638 // XXX
1639 break;
1640 case 0x45: // LSU
1641 ret = env->lsu;
1642 break;
1643 case 0x50: // I-MMU regs
1645 int reg = (addr >> 3) & 0xf;
1647 ret = env->immuregs[reg];
1648 break;
1650 case 0x51: // I-MMU 8k TSB pointer
1651 case 0x52: // I-MMU 64k TSB pointer
1652 case 0x55: // I-MMU data access
1653 // XXX
1654 break;
1655 case 0x56: // I-MMU tag read
1657 unsigned int i;
1659 for (i = 0; i < 64; i++) {
1660 // Valid, ctx match, vaddr match
1661 if ((env->itlb_tte[i] & 0x8000000000000000ULL) != 0 &&
1662 env->itlb_tag[i] == addr) {
1663 ret = env->itlb_tag[i];
1664 break;
1667 break;
1669 case 0x58: // D-MMU regs
1671 int reg = (addr >> 3) & 0xf;
1673 ret = env->dmmuregs[reg];
1674 break;
1676 case 0x5e: // D-MMU tag read
1678 unsigned int i;
1680 for (i = 0; i < 64; i++) {
1681 // Valid, ctx match, vaddr match
1682 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) != 0 &&
1683 env->dtlb_tag[i] == addr) {
1684 ret = env->dtlb_tag[i];
1685 break;
1688 break;
1690 case 0x59: // D-MMU 8k TSB pointer
1691 case 0x5a: // D-MMU 64k TSB pointer
1692 case 0x5b: // D-MMU data pointer
1693 case 0x5d: // D-MMU data access
1694 case 0x48: // Interrupt dispatch, RO
1695 case 0x49: // Interrupt data receive
1696 case 0x7f: // Incoming interrupt vector, RO
1697 // XXX
1698 break;
1699 case 0x54: // I-MMU data in, WO
1700 case 0x57: // I-MMU demap, WO
1701 case 0x5c: // D-MMU data in, WO
1702 case 0x5f: // D-MMU demap, WO
1703 case 0x77: // Interrupt vector, WO
1704 default:
1705 do_unassigned_access(addr, 0, 0, 1);
1706 ret = 0;
1707 break;
1710 /* Convert from little endian */
1711 switch (asi) {
1712 case 0x0c: // Nucleus Little Endian (LE)
1713 case 0x18: // As if user primary LE
1714 case 0x19: // As if user secondary LE
1715 case 0x1c: // Bypass LE
1716 case 0x1d: // Bypass, non-cacheable LE
1717 case 0x88: // Primary LE
1718 case 0x89: // Secondary LE
1719 case 0x8a: // Primary no-fault LE
1720 case 0x8b: // Secondary no-fault LE
1721 switch(size) {
1722 case 2:
1723 ret = bswap16(ret);
1724 break;
1725 case 4:
1726 ret = bswap32(ret);
1727 break;
1728 case 8:
1729 ret = bswap64(ret);
1730 break;
1731 default:
1732 break;
1734 default:
1735 break;
1738 /* Convert to signed number */
1739 if (sign) {
1740 switch(size) {
1741 case 1:
1742 ret = (int8_t) ret;
1743 break;
1744 case 2:
1745 ret = (int16_t) ret;
1746 break;
1747 case 4:
1748 ret = (int32_t) ret;
1749 break;
1750 default:
1751 break;
1754 #ifdef DEBUG_ASI
1755 dump_asi("read ", last_addr, asi, size, ret);
1756 #endif
1757 return ret;
1760 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1762 #ifdef DEBUG_ASI
1763 dump_asi("write", addr, asi, size, val);
1764 #endif
1765 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1766 || (asi >= 0x30 && asi < 0x80 && !(env->hpstate & HS_PRIV)))
1767 raise_exception(TT_PRIV_ACT);
1769 helper_check_align(addr, size - 1);
1770 /* Convert to little endian */
1771 switch (asi) {
1772 case 0x0c: // Nucleus Little Endian (LE)
1773 case 0x18: // As if user primary LE
1774 case 0x19: // As if user secondary LE
1775 case 0x1c: // Bypass LE
1776 case 0x1d: // Bypass, non-cacheable LE
1777 case 0x88: // Primary LE
1778 case 0x89: // Secondary LE
1779 switch(size) {
1780 case 2:
1781 addr = bswap16(addr);
1782 break;
1783 case 4:
1784 addr = bswap32(addr);
1785 break;
1786 case 8:
1787 addr = bswap64(addr);
1788 break;
1789 default:
1790 break;
1792 default:
1793 break;
1796 switch(asi) {
1797 case 0x10: // As if user primary
1798 case 0x18: // As if user primary LE
1799 case 0x80: // Primary
1800 case 0x88: // Primary LE
1801 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1802 if (env->hpstate & HS_PRIV) {
1803 switch(size) {
1804 case 1:
1805 stb_hypv(addr, val);
1806 break;
1807 case 2:
1808 stw_hypv(addr, val);
1809 break;
1810 case 4:
1811 stl_hypv(addr, val);
1812 break;
1813 case 8:
1814 default:
1815 stq_hypv(addr, val);
1816 break;
1818 } else {
1819 switch(size) {
1820 case 1:
1821 stb_kernel(addr, val);
1822 break;
1823 case 2:
1824 stw_kernel(addr, val);
1825 break;
1826 case 4:
1827 stl_kernel(addr, val);
1828 break;
1829 case 8:
1830 default:
1831 stq_kernel(addr, val);
1832 break;
1835 } else {
1836 switch(size) {
1837 case 1:
1838 stb_user(addr, val);
1839 break;
1840 case 2:
1841 stw_user(addr, val);
1842 break;
1843 case 4:
1844 stl_user(addr, val);
1845 break;
1846 case 8:
1847 default:
1848 stq_user(addr, val);
1849 break;
1852 break;
1853 case 0x14: // Bypass
1854 case 0x15: // Bypass, non-cacheable
1855 case 0x1c: // Bypass LE
1856 case 0x1d: // Bypass, non-cacheable LE
1858 switch(size) {
1859 case 1:
1860 stb_phys(addr, val);
1861 break;
1862 case 2:
1863 stw_phys(addr, val);
1864 break;
1865 case 4:
1866 stl_phys(addr, val);
1867 break;
1868 case 8:
1869 default:
1870 stq_phys(addr, val);
1871 break;
1874 return;
1875 case 0x04: // Nucleus
1876 case 0x0c: // Nucleus Little Endian (LE)
1877 case 0x11: // As if user secondary
1878 case 0x19: // As if user secondary LE
1879 case 0x24: // Nucleus quad LDD 128 bit atomic
1880 case 0x2c: // Nucleus quad LDD 128 bit atomic
1881 case 0x4a: // UPA config
1882 case 0x81: // Secondary
1883 case 0x89: // Secondary LE
1884 // XXX
1885 return;
1886 case 0x45: // LSU
1888 uint64_t oldreg;
1890 oldreg = env->lsu;
1891 env->lsu = val & (DMMU_E | IMMU_E);
1892 // Mappings generated during D/I MMU disabled mode are
1893 // invalid in normal mode
1894 if (oldreg != env->lsu) {
1895 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
1896 oldreg, env->lsu);
1897 #ifdef DEBUG_MMU
1898 dump_mmu(env);
1899 #endif
1900 tlb_flush(env, 1);
1902 return;
1904 case 0x50: // I-MMU regs
1906 int reg = (addr >> 3) & 0xf;
1907 uint64_t oldreg;
1909 oldreg = env->immuregs[reg];
1910 switch(reg) {
1911 case 0: // RO
1912 case 4:
1913 return;
1914 case 1: // Not in I-MMU
1915 case 2:
1916 case 7:
1917 case 8:
1918 return;
1919 case 3: // SFSR
1920 if ((val & 1) == 0)
1921 val = 0; // Clear SFSR
1922 break;
1923 case 5: // TSB access
1924 case 6: // Tag access
1925 default:
1926 break;
1928 env->immuregs[reg] = val;
1929 if (oldreg != env->immuregs[reg]) {
1930 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
1931 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1933 #ifdef DEBUG_MMU
1934 dump_mmu(env);
1935 #endif
1936 return;
1938 case 0x54: // I-MMU data in
1940 unsigned int i;
1942 // Try finding an invalid entry
1943 for (i = 0; i < 64; i++) {
1944 if ((env->itlb_tte[i] & 0x8000000000000000ULL) == 0) {
1945 env->itlb_tag[i] = env->immuregs[6];
1946 env->itlb_tte[i] = val;
1947 return;
1950 // Try finding an unlocked entry
1951 for (i = 0; i < 64; i++) {
1952 if ((env->itlb_tte[i] & 0x40) == 0) {
1953 env->itlb_tag[i] = env->immuregs[6];
1954 env->itlb_tte[i] = val;
1955 return;
1958 // error state?
1959 return;
1961 case 0x55: // I-MMU data access
1963 unsigned int i = (addr >> 3) & 0x3f;
1965 env->itlb_tag[i] = env->immuregs[6];
1966 env->itlb_tte[i] = val;
1967 return;
1969 case 0x57: // I-MMU demap
1970 // XXX
1971 return;
1972 case 0x58: // D-MMU regs
1974 int reg = (addr >> 3) & 0xf;
1975 uint64_t oldreg;
1977 oldreg = env->dmmuregs[reg];
1978 switch(reg) {
1979 case 0: // RO
1980 case 4:
1981 return;
1982 case 3: // SFSR
1983 if ((val & 1) == 0) {
1984 val = 0; // Clear SFSR, Fault address
1985 env->dmmuregs[4] = 0;
1987 env->dmmuregs[reg] = val;
1988 break;
1989 case 1: // Primary context
1990 case 2: // Secondary context
1991 case 5: // TSB access
1992 case 6: // Tag access
1993 case 7: // Virtual Watchpoint
1994 case 8: // Physical Watchpoint
1995 default:
1996 break;
1998 env->dmmuregs[reg] = val;
1999 if (oldreg != env->dmmuregs[reg]) {
2000 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2001 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2003 #ifdef DEBUG_MMU
2004 dump_mmu(env);
2005 #endif
2006 return;
2008 case 0x5c: // D-MMU data in
2010 unsigned int i;
2012 // Try finding an invalid entry
2013 for (i = 0; i < 64; i++) {
2014 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) == 0) {
2015 env->dtlb_tag[i] = env->dmmuregs[6];
2016 env->dtlb_tte[i] = val;
2017 return;
2020 // Try finding an unlocked entry
2021 for (i = 0; i < 64; i++) {
2022 if ((env->dtlb_tte[i] & 0x40) == 0) {
2023 env->dtlb_tag[i] = env->dmmuregs[6];
2024 env->dtlb_tte[i] = val;
2025 return;
2028 // error state?
2029 return;
2031 case 0x5d: // D-MMU data access
2033 unsigned int i = (addr >> 3) & 0x3f;
2035 env->dtlb_tag[i] = env->dmmuregs[6];
2036 env->dtlb_tte[i] = val;
2037 return;
2039 case 0x5f: // D-MMU demap
2040 case 0x49: // Interrupt data receive
2041 // XXX
2042 return;
2043 case 0x51: // I-MMU 8k TSB pointer, RO
2044 case 0x52: // I-MMU 64k TSB pointer, RO
2045 case 0x56: // I-MMU tag read, RO
2046 case 0x59: // D-MMU 8k TSB pointer, RO
2047 case 0x5a: // D-MMU 64k TSB pointer, RO
2048 case 0x5b: // D-MMU data pointer, RO
2049 case 0x5e: // D-MMU tag read, RO
2050 case 0x48: // Interrupt dispatch, RO
2051 case 0x7f: // Incoming interrupt vector, RO
2052 case 0x82: // Primary no-fault, RO
2053 case 0x83: // Secondary no-fault, RO
2054 case 0x8a: // Primary no-fault LE, RO
2055 case 0x8b: // Secondary no-fault LE, RO
2056 default:
2057 do_unassigned_access(addr, 1, 0, 1);
2058 return;
2061 #endif /* CONFIG_USER_ONLY */
2063 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2065 unsigned int i;
2066 target_ulong val;
2068 helper_check_align(addr, 3);
2069 switch (asi) {
2070 case 0xf0: // Block load primary
2071 case 0xf1: // Block load secondary
2072 case 0xf8: // Block load primary LE
2073 case 0xf9: // Block load secondary LE
2074 if (rd & 7) {
2075 raise_exception(TT_ILL_INSN);
2076 return;
2078 helper_check_align(addr, 0x3f);
2079 for (i = 0; i < 16; i++) {
2080 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
2082 addr += 4;
2085 return;
2086 default:
2087 break;
2090 val = helper_ld_asi(addr, asi, size, 0);
2091 switch(size) {
2092 default:
2093 case 4:
2094 *((uint32_t *)&FT0) = val;
2095 break;
2096 case 8:
2097 *((int64_t *)&DT0) = val;
2098 break;
2099 case 16:
2100 // XXX
2101 break;
2105 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2107 unsigned int i;
2108 target_ulong val = 0;
2110 helper_check_align(addr, 3);
2111 switch (asi) {
2112 case 0xf0: // Block store primary
2113 case 0xf1: // Block store secondary
2114 case 0xf8: // Block store primary LE
2115 case 0xf9: // Block store secondary LE
2116 if (rd & 7) {
2117 raise_exception(TT_ILL_INSN);
2118 return;
2120 helper_check_align(addr, 0x3f);
2121 for (i = 0; i < 16; i++) {
2122 val = *(uint32_t *)&env->fpr[rd++];
2123 helper_st_asi(addr, val, asi & 0x8f, 4);
2124 addr += 4;
2127 return;
2128 default:
2129 break;
2132 switch(size) {
2133 default:
2134 case 4:
2135 val = *((uint32_t *)&FT0);
2136 break;
2137 case 8:
2138 val = *((int64_t *)&DT0);
2139 break;
2140 case 16:
2141 // XXX
2142 break;
2144 helper_st_asi(addr, val, asi, size);
2147 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2148 target_ulong val2, uint32_t asi)
2150 target_ulong ret;
2152 val1 &= 0xffffffffUL;
2153 ret = helper_ld_asi(addr, asi, 4, 0);
2154 ret &= 0xffffffffUL;
2155 if (val1 == ret)
2156 helper_st_asi(addr, val2 & 0xffffffffUL, asi, 4);
2157 return ret;
2160 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2161 target_ulong val2, uint32_t asi)
2163 target_ulong ret;
2165 ret = helper_ld_asi(addr, asi, 8, 0);
2166 if (val1 == ret)
2167 helper_st_asi(addr, val2, asi, 8);
2168 return ret;
2170 #endif /* TARGET_SPARC64 */
2172 #ifndef TARGET_SPARC64
2173 void helper_rett(void)
2175 unsigned int cwp;
2177 if (env->psret == 1)
2178 raise_exception(TT_ILL_INSN);
2180 env->psret = 1;
2181 cwp = (env->cwp + 1) & (NWINDOWS - 1);
2182 if (env->wim & (1 << cwp)) {
2183 raise_exception(TT_WIN_UNF);
2185 set_cwp(cwp);
2186 env->psrs = env->psrps;
2188 #endif
2190 target_ulong helper_udiv(target_ulong a, target_ulong b)
2192 uint64_t x0;
2193 uint32_t x1;
2195 x0 = a | ((uint64_t) (env->y) << 32);
2196 x1 = b;
2198 if (x1 == 0) {
2199 raise_exception(TT_DIV_ZERO);
2202 x0 = x0 / x1;
2203 if (x0 > 0xffffffff) {
2204 env->cc_src2 = 1;
2205 return 0xffffffff;
2206 } else {
2207 env->cc_src2 = 0;
2208 return x0;
2212 target_ulong helper_sdiv(target_ulong a, target_ulong b)
2214 int64_t x0;
2215 int32_t x1;
2217 x0 = a | ((int64_t) (env->y) << 32);
2218 x1 = b;
2220 if (x1 == 0) {
2221 raise_exception(TT_DIV_ZERO);
2224 x0 = x0 / x1;
2225 if ((int32_t) x0 != x0) {
2226 env->cc_src2 = 1;
2227 return x0 < 0? 0x80000000: 0x7fffffff;
2228 } else {
2229 env->cc_src2 = 0;
2230 return x0;
2234 uint64_t helper_pack64(target_ulong high, target_ulong low)
2236 return ((uint64_t)high << 32) | (uint64_t)(low & 0xffffffff);
2239 void helper_stdf(target_ulong addr, int mem_idx)
2241 helper_check_align(addr, 7);
2242 #if !defined(CONFIG_USER_ONLY)
2243 switch (mem_idx) {
2244 case 0:
2245 stfq_user(addr, DT0);
2246 break;
2247 case 1:
2248 stfq_kernel(addr, DT0);
2249 break;
2250 #ifdef TARGET_SPARC64
2251 case 2:
2252 stfq_hypv(addr, DT0);
2253 break;
2254 #endif
2255 default:
2256 break;
2258 #else
2259 ABI32_MASK(addr);
2260 stfq_raw(addr, DT0);
2261 #endif
2264 void helper_lddf(target_ulong addr, int mem_idx)
2266 helper_check_align(addr, 7);
2267 #if !defined(CONFIG_USER_ONLY)
2268 switch (mem_idx) {
2269 case 0:
2270 DT0 = ldfq_user(addr);
2271 break;
2272 case 1:
2273 DT0 = ldfq_kernel(addr);
2274 break;
2275 #ifdef TARGET_SPARC64
2276 case 2:
2277 DT0 = ldfq_hypv(addr);
2278 break;
2279 #endif
2280 default:
2281 break;
2283 #else
2284 ABI32_MASK(addr);
2285 DT0 = ldfq_raw(addr);
2286 #endif
2289 void helper_ldqf(target_ulong addr, int mem_idx)
2291 // XXX add 128 bit load
2292 CPU_QuadU u;
2294 helper_check_align(addr, 7);
2295 #if !defined(CONFIG_USER_ONLY)
2296 switch (mem_idx) {
2297 case 0:
2298 u.ll.upper = ldq_user(addr);
2299 u.ll.lower = ldq_user(addr + 8);
2300 QT0 = u.q;
2301 break;
2302 case 1:
2303 u.ll.upper = ldq_kernel(addr);
2304 u.ll.lower = ldq_kernel(addr + 8);
2305 QT0 = u.q;
2306 break;
2307 #ifdef TARGET_SPARC64
2308 case 2:
2309 u.ll.upper = ldq_hypv(addr);
2310 u.ll.lower = ldq_hypv(addr + 8);
2311 QT0 = u.q;
2312 break;
2313 #endif
2314 default:
2315 break;
2317 #else
2318 ABI32_MASK(addr);
2319 u.ll.upper = ldq_raw(addr);
2320 u.ll.lower = ldq_raw((addr + 8) & 0xffffffffULL);
2321 QT0 = u.q;
2322 #endif
2325 void helper_stqf(target_ulong addr, int mem_idx)
2327 // XXX add 128 bit store
2328 CPU_QuadU u;
2330 helper_check_align(addr, 7);
2331 #if !defined(CONFIG_USER_ONLY)
2332 switch (mem_idx) {
2333 case 0:
2334 u.q = QT0;
2335 stq_user(addr, u.ll.upper);
2336 stq_user(addr + 8, u.ll.lower);
2337 break;
2338 case 1:
2339 u.q = QT0;
2340 stq_kernel(addr, u.ll.upper);
2341 stq_kernel(addr + 8, u.ll.lower);
2342 break;
2343 #ifdef TARGET_SPARC64
2344 case 2:
2345 u.q = QT0;
2346 stq_hypv(addr, u.ll.upper);
2347 stq_hypv(addr + 8, u.ll.lower);
2348 break;
2349 #endif
2350 default:
2351 break;
2353 #else
2354 u.q = QT0;
2355 ABI32_MASK(addr);
2356 stq_raw(addr, u.ll.upper);
2357 stq_raw((addr + 8) & 0xffffffffULL, u.ll.lower);
2358 #endif
2361 void helper_ldfsr(void)
2363 int rnd_mode;
2365 PUT_FSR32(env, *((uint32_t *) &FT0));
2366 switch (env->fsr & FSR_RD_MASK) {
2367 case FSR_RD_NEAREST:
2368 rnd_mode = float_round_nearest_even;
2369 break;
2370 default:
2371 case FSR_RD_ZERO:
2372 rnd_mode = float_round_to_zero;
2373 break;
2374 case FSR_RD_POS:
2375 rnd_mode = float_round_up;
2376 break;
2377 case FSR_RD_NEG:
2378 rnd_mode = float_round_down;
2379 break;
2381 set_float_rounding_mode(rnd_mode, &env->fp_status);
2384 void helper_stfsr(void)
2386 *((uint32_t *) &FT0) = GET_FSR32(env);
2389 void helper_debug(void)
2391 env->exception_index = EXCP_DEBUG;
2392 cpu_loop_exit();
2395 #ifndef TARGET_SPARC64
2396 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2397 handling ? */
2398 void helper_save(void)
2400 uint32_t cwp;
2402 cwp = (env->cwp - 1) & (NWINDOWS - 1);
2403 if (env->wim & (1 << cwp)) {
2404 raise_exception(TT_WIN_OVF);
2406 set_cwp(cwp);
2409 void helper_restore(void)
2411 uint32_t cwp;
2413 cwp = (env->cwp + 1) & (NWINDOWS - 1);
2414 if (env->wim & (1 << cwp)) {
2415 raise_exception(TT_WIN_UNF);
2417 set_cwp(cwp);
2420 void helper_wrpsr(target_ulong new_psr)
2422 if ((new_psr & PSR_CWP) >= NWINDOWS)
2423 raise_exception(TT_ILL_INSN);
2424 else
2425 PUT_PSR(env, new_psr);
2428 target_ulong helper_rdpsr(void)
2430 return GET_PSR(env);
2433 #else
2434 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2435 handling ? */
2436 void helper_save(void)
2438 uint32_t cwp;
2440 cwp = (env->cwp - 1) & (NWINDOWS - 1);
2441 if (env->cansave == 0) {
2442 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2443 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2444 ((env->wstate & 0x7) << 2)));
2445 } else {
2446 if (env->cleanwin - env->canrestore == 0) {
2447 // XXX Clean windows without trap
2448 raise_exception(TT_CLRWIN);
2449 } else {
2450 env->cansave--;
2451 env->canrestore++;
2452 set_cwp(cwp);
2457 void helper_restore(void)
2459 uint32_t cwp;
2461 cwp = (env->cwp + 1) & (NWINDOWS - 1);
2462 if (env->canrestore == 0) {
2463 raise_exception(TT_FILL | (env->otherwin != 0 ?
2464 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2465 ((env->wstate & 0x7) << 2)));
2466 } else {
2467 env->cansave++;
2468 env->canrestore--;
2469 set_cwp(cwp);
2473 void helper_flushw(void)
2475 if (env->cansave != NWINDOWS - 2) {
2476 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2477 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2478 ((env->wstate & 0x7) << 2)));
2482 void helper_saved(void)
2484 env->cansave++;
2485 if (env->otherwin == 0)
2486 env->canrestore--;
2487 else
2488 env->otherwin--;
2491 void helper_restored(void)
2493 env->canrestore++;
2494 if (env->cleanwin < NWINDOWS - 1)
2495 env->cleanwin++;
2496 if (env->otherwin == 0)
2497 env->cansave--;
2498 else
2499 env->otherwin--;
2502 target_ulong helper_rdccr(void)
2504 return GET_CCR(env);
2507 void helper_wrccr(target_ulong new_ccr)
2509 PUT_CCR(env, new_ccr);
2512 // CWP handling is reversed in V9, but we still use the V8 register
2513 // order.
2514 target_ulong helper_rdcwp(void)
2516 return GET_CWP64(env);
2519 void helper_wrcwp(target_ulong new_cwp)
2521 PUT_CWP64(env, new_cwp);
2524 // This function uses non-native bit order
2525 #define GET_FIELD(X, FROM, TO) \
2526 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
2528 // This function uses the order in the manuals, i.e. bit 0 is 2^0
2529 #define GET_FIELD_SP(X, FROM, TO) \
2530 GET_FIELD(X, 63 - (TO), 63 - (FROM))
2532 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
2534 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
2535 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
2536 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
2537 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
2538 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
2539 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
2540 (((pixel_addr >> 55) & 1) << 4) |
2541 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
2542 GET_FIELD_SP(pixel_addr, 11, 12);
2545 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
2547 uint64_t tmp;
2549 tmp = addr + offset;
2550 env->gsr &= ~7ULL;
2551 env->gsr |= tmp & 7ULL;
2552 return tmp & ~7ULL;
2555 target_ulong helper_popc(target_ulong val)
2557 return ctpop64(val);
2560 static inline uint64_t *get_gregset(uint64_t pstate)
2562 switch (pstate) {
2563 default:
2564 case 0:
2565 return env->bgregs;
2566 case PS_AG:
2567 return env->agregs;
2568 case PS_MG:
2569 return env->mgregs;
2570 case PS_IG:
2571 return env->igregs;
2575 void change_pstate(uint64_t new_pstate)
2577 uint64_t pstate_regs, new_pstate_regs;
2578 uint64_t *src, *dst;
2580 pstate_regs = env->pstate & 0xc01;
2581 new_pstate_regs = new_pstate & 0xc01;
2582 if (new_pstate_regs != pstate_regs) {
2583 // Switch global register bank
2584 src = get_gregset(new_pstate_regs);
2585 dst = get_gregset(pstate_regs);
2586 memcpy32(dst, env->gregs);
2587 memcpy32(env->gregs, src);
2589 env->pstate = new_pstate;
2592 void helper_wrpstate(target_ulong new_state)
2594 change_pstate(new_state & 0xf3f);
2597 void helper_done(void)
2599 env->tl--;
2600 env->tsptr = &env->ts[env->tl];
2601 env->pc = env->tsptr->tpc;
2602 env->npc = env->tsptr->tnpc + 4;
2603 PUT_CCR(env, env->tsptr->tstate >> 32);
2604 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2605 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2606 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2609 void helper_retry(void)
2611 env->tl--;
2612 env->tsptr = &env->ts[env->tl];
2613 env->pc = env->tsptr->tpc;
2614 env->npc = env->tsptr->tnpc;
2615 PUT_CCR(env, env->tsptr->tstate >> 32);
2616 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2617 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2618 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2620 #endif
2622 void cpu_set_cwp(CPUState *env1, int new_cwp)
2624 /* put the modified wrap registers at their proper location */
2625 if (env1->cwp == (NWINDOWS - 1))
2626 memcpy32(env1->regbase, env1->regbase + NWINDOWS * 16);
2627 env1->cwp = new_cwp;
2628 /* put the wrap registers at their temporary location */
2629 if (new_cwp == (NWINDOWS - 1))
2630 memcpy32(env1->regbase + NWINDOWS * 16, env1->regbase);
2631 env1->regwptr = env1->regbase + (new_cwp * 16);
2634 void set_cwp(int new_cwp)
2636 cpu_set_cwp(env, new_cwp);
2639 void helper_flush(target_ulong addr)
2641 addr &= ~7;
2642 tb_invalidate_page_range(addr, addr + 8);
2645 #if !defined(CONFIG_USER_ONLY)
2647 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
2648 void *retaddr);
2650 #define MMUSUFFIX _mmu
2651 #define ALIGNED_ONLY
2653 #define SHIFT 0
2654 #include "softmmu_template.h"
2656 #define SHIFT 1
2657 #include "softmmu_template.h"
2659 #define SHIFT 2
2660 #include "softmmu_template.h"
2662 #define SHIFT 3
2663 #include "softmmu_template.h"
2665 /* XXX: make it generic ? */
2666 static void cpu_restore_state2(void *retaddr)
2668 TranslationBlock *tb;
2669 unsigned long pc;
2671 if (retaddr) {
2672 /* now we have a real cpu fault */
2673 pc = (unsigned long)retaddr;
2674 tb = tb_find_pc(pc);
2675 if (tb) {
2676 /* the PC is inside the translated code. It means that we have
2677 a virtual CPU fault */
2678 cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
2683 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
2684 void *retaddr)
2686 #ifdef DEBUG_UNALIGNED
2687 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
2688 "\n", addr, env->pc);
2689 #endif
2690 cpu_restore_state2(retaddr);
2691 raise_exception(TT_UNALIGNED);
2694 /* try to fill the TLB and return an exception if error. If retaddr is
2695 NULL, it means that the function was called in C code (i.e. not
2696 from generated code or from helper.c) */
2697 /* XXX: fix it to restore all registers */
2698 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2700 int ret;
2701 CPUState *saved_env;
2703 /* XXX: hack to restore env in all cases, even if not called from
2704 generated code */
2705 saved_env = env;
2706 env = cpu_single_env;
2708 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2709 if (ret) {
2710 cpu_restore_state2(retaddr);
2711 cpu_loop_exit();
2713 env = saved_env;
2716 #endif
2718 #ifndef TARGET_SPARC64
2719 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
2720 int is_asi)
2722 CPUState *saved_env;
2724 /* XXX: hack to restore env in all cases, even if not called from
2725 generated code */
2726 saved_env = env;
2727 env = cpu_single_env;
2728 #ifdef DEBUG_UNASSIGNED
2729 if (is_asi)
2730 printf("Unassigned mem %s access to " TARGET_FMT_plx
2731 " asi 0x%02x from " TARGET_FMT_lx "\n",
2732 is_exec ? "exec" : is_write ? "write" : "read", addr, is_asi,
2733 env->pc);
2734 else
2735 printf("Unassigned mem %s access to " TARGET_FMT_plx " from "
2736 TARGET_FMT_lx "\n",
2737 is_exec ? "exec" : is_write ? "write" : "read", addr, env->pc);
2738 #endif
2739 if (env->mmuregs[3]) /* Fault status register */
2740 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
2741 if (is_asi)
2742 env->mmuregs[3] |= 1 << 16;
2743 if (env->psrs)
2744 env->mmuregs[3] |= 1 << 5;
2745 if (is_exec)
2746 env->mmuregs[3] |= 1 << 6;
2747 if (is_write)
2748 env->mmuregs[3] |= 1 << 7;
2749 env->mmuregs[3] |= (5 << 2) | 2;
2750 env->mmuregs[4] = addr; /* Fault address register */
2751 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
2752 if (is_exec)
2753 raise_exception(TT_CODE_ACCESS);
2754 else
2755 raise_exception(TT_DATA_ACCESS);
2757 env = saved_env;
2759 #else
2760 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
2761 int is_asi)
2763 #ifdef DEBUG_UNASSIGNED
2764 CPUState *saved_env;
2766 /* XXX: hack to restore env in all cases, even if not called from
2767 generated code */
2768 saved_env = env;
2769 env = cpu_single_env;
2770 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
2771 "\n", addr, env->pc);
2772 env = saved_env;
2773 #endif
2774 if (is_exec)
2775 raise_exception(TT_CODE_ACCESS);
2776 else
2777 raise_exception(TT_DATA_ACCESS);
2779 #endif