sparc64: handle asi referencing nucleus and secondary MMU contexts
[qemu/aliguori-queue.git] / target-sparc / op_helper.c
blobb6b08d3ae0d9555d0a8c88f1a3db99e3cb6f8022
1 #include "exec.h"
2 #include "host-utils.h"
3 #include "helper.h"
5 //#define DEBUG_MMU
6 //#define DEBUG_MXCC
7 //#define DEBUG_UNALIGNED
8 //#define DEBUG_UNASSIGNED
9 //#define DEBUG_ASI
10 //#define DEBUG_PCALL
11 //#define DEBUG_PSTATE
13 #ifdef DEBUG_MMU
14 #define DPRINTF_MMU(fmt, ...) \
15 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
16 #else
17 #define DPRINTF_MMU(fmt, ...) do {} while (0)
18 #endif
20 #ifdef DEBUG_MXCC
21 #define DPRINTF_MXCC(fmt, ...) \
22 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
23 #else
24 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
25 #endif
27 #ifdef DEBUG_ASI
28 #define DPRINTF_ASI(fmt, ...) \
29 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
30 #endif
32 #ifdef DEBUG_PSTATE
33 #define DPRINTF_PSTATE(fmt, ...) \
34 do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
35 #else
36 #define DPRINTF_PSTATE(fmt, ...) do {} while (0)
37 #endif
39 #ifdef TARGET_SPARC64
40 #ifndef TARGET_ABI32
41 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
42 #else
43 #define AM_CHECK(env1) (1)
44 #endif
45 #endif
47 #if defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
48 static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
49 int is_asi, int size);
50 #endif
52 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
53 // Calculates TSB pointer value for fault page size 8k or 64k
54 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
55 uint64_t tag_access_register,
56 int page_size)
58 uint64_t tsb_base = tsb_register & ~0x1fffULL;
59 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
60 int tsb_size = tsb_register & 0xf;
62 // discard lower 13 bits which hold tag access context
63 uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
65 // now reorder bits
66 uint64_t tsb_base_mask = ~0x1fffULL;
67 uint64_t va = tag_access_va;
69 // move va bits to correct position
70 if (page_size == 8*1024) {
71 va >>= 9;
72 } else if (page_size == 64*1024) {
73 va >>= 12;
76 if (tsb_size) {
77 tsb_base_mask <<= tsb_size;
80 // calculate tsb_base mask and adjust va if split is in use
81 if (tsb_split) {
82 if (page_size == 8*1024) {
83 va &= ~(1ULL << (13 + tsb_size));
84 } else if (page_size == 64*1024) {
85 va |= (1ULL << (13 + tsb_size));
87 tsb_base_mask <<= 1;
90 return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
93 // Calculates tag target register value by reordering bits
94 // in tag access register
95 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
97 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
100 static void replace_tlb_entry(SparcTLBEntry *tlb,
101 uint64_t tlb_tag, uint64_t tlb_tte,
102 CPUState *env1)
104 target_ulong mask, size, va, offset;
106 // flush page range if translation is valid
107 if (TTE_IS_VALID(tlb->tte)) {
109 mask = 0xffffffffffffe000ULL;
110 mask <<= 3 * ((tlb->tte >> 61) & 3);
111 size = ~mask + 1;
113 va = tlb->tag & mask;
115 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
116 tlb_flush_page(env1, va + offset);
120 tlb->tag = tlb_tag;
121 tlb->tte = tlb_tte;
124 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
125 const char* strmmu, CPUState *env1)
127 unsigned int i;
128 target_ulong mask;
129 uint64_t context;
131 int is_demap_context = (demap_addr >> 6) & 1;
133 // demap context
134 switch ((demap_addr >> 4) & 3) {
135 case 0: // primary
136 context = env1->dmmu.mmu_primary_context;
137 break;
138 case 1: // secondary
139 context = env1->dmmu.mmu_secondary_context;
140 break;
141 case 2: // nucleus
142 context = 0;
143 break;
144 case 3: // reserved
145 default:
146 return;
149 for (i = 0; i < 64; i++) {
150 if (TTE_IS_VALID(tlb[i].tte)) {
152 if (is_demap_context) {
153 // will remove non-global entries matching context value
154 if (TTE_IS_GLOBAL(tlb[i].tte) ||
155 !tlb_compare_context(&tlb[i], context)) {
156 continue;
158 } else {
159 // demap page
160 // will remove any entry matching VA
161 mask = 0xffffffffffffe000ULL;
162 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
164 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
165 continue;
168 // entry should be global or matching context value
169 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
170 !tlb_compare_context(&tlb[i], context)) {
171 continue;
175 replace_tlb_entry(&tlb[i], 0, 0, env1);
176 #ifdef DEBUG_MMU
177 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
178 dump_mmu(env1);
179 #endif
184 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
185 uint64_t tlb_tag, uint64_t tlb_tte,
186 const char* strmmu, CPUState *env1)
188 unsigned int i, replace_used;
190 // Try replacing invalid entry
191 for (i = 0; i < 64; i++) {
192 if (!TTE_IS_VALID(tlb[i].tte)) {
193 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
194 #ifdef DEBUG_MMU
195 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
196 dump_mmu(env1);
197 #endif
198 return;
202 // All entries are valid, try replacing unlocked entry
204 for (replace_used = 0; replace_used < 2; ++replace_used) {
206 // Used entries are not replaced on first pass
208 for (i = 0; i < 64; i++) {
209 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
211 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
212 #ifdef DEBUG_MMU
213 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
214 strmmu, (replace_used?"used":"unused"), i);
215 dump_mmu(env1);
216 #endif
217 return;
221 // Now reset used bit and search for unused entries again
223 for (i = 0; i < 64; i++) {
224 TTE_SET_UNUSED(tlb[i].tte);
228 #ifdef DEBUG_MMU
229 DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
230 #endif
231 // error state?
234 #endif
236 static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
238 #ifdef TARGET_SPARC64
239 if (AM_CHECK(env1))
240 addr &= 0xffffffffULL;
241 #endif
242 return addr;
245 static void raise_exception(int tt)
247 env->exception_index = tt;
248 cpu_loop_exit();
251 void HELPER(raise_exception)(int tt)
253 raise_exception(tt);
256 static inline void set_cwp(int new_cwp)
258 cpu_set_cwp(env, new_cwp);
261 void helper_check_align(target_ulong addr, uint32_t align)
263 if (addr & align) {
264 #ifdef DEBUG_UNALIGNED
265 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
266 "\n", addr, env->pc);
267 #endif
268 raise_exception(TT_UNALIGNED);
272 #define F_HELPER(name, p) void helper_f##name##p(void)
274 #define F_BINOP(name) \
275 float32 helper_f ## name ## s (float32 src1, float32 src2) \
277 return float32_ ## name (src1, src2, &env->fp_status); \
279 F_HELPER(name, d) \
281 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
283 F_HELPER(name, q) \
285 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
288 F_BINOP(add);
289 F_BINOP(sub);
290 F_BINOP(mul);
291 F_BINOP(div);
292 #undef F_BINOP
294 void helper_fsmuld(float32 src1, float32 src2)
296 DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
297 float32_to_float64(src2, &env->fp_status),
298 &env->fp_status);
301 void helper_fdmulq(void)
303 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
304 float64_to_float128(DT1, &env->fp_status),
305 &env->fp_status);
308 float32 helper_fnegs(float32 src)
310 return float32_chs(src);
313 #ifdef TARGET_SPARC64
314 F_HELPER(neg, d)
316 DT0 = float64_chs(DT1);
319 F_HELPER(neg, q)
321 QT0 = float128_chs(QT1);
323 #endif
325 /* Integer to float conversion. */
326 float32 helper_fitos(int32_t src)
328 return int32_to_float32(src, &env->fp_status);
331 void helper_fitod(int32_t src)
333 DT0 = int32_to_float64(src, &env->fp_status);
336 void helper_fitoq(int32_t src)
338 QT0 = int32_to_float128(src, &env->fp_status);
341 #ifdef TARGET_SPARC64
342 float32 helper_fxtos(void)
344 return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
347 F_HELPER(xto, d)
349 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
352 F_HELPER(xto, q)
354 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
356 #endif
357 #undef F_HELPER
359 /* floating point conversion */
360 float32 helper_fdtos(void)
362 return float64_to_float32(DT1, &env->fp_status);
365 void helper_fstod(float32 src)
367 DT0 = float32_to_float64(src, &env->fp_status);
370 float32 helper_fqtos(void)
372 return float128_to_float32(QT1, &env->fp_status);
375 void helper_fstoq(float32 src)
377 QT0 = float32_to_float128(src, &env->fp_status);
380 void helper_fqtod(void)
382 DT0 = float128_to_float64(QT1, &env->fp_status);
385 void helper_fdtoq(void)
387 QT0 = float64_to_float128(DT1, &env->fp_status);
390 /* Float to integer conversion. */
391 int32_t helper_fstoi(float32 src)
393 return float32_to_int32_round_to_zero(src, &env->fp_status);
396 int32_t helper_fdtoi(void)
398 return float64_to_int32_round_to_zero(DT1, &env->fp_status);
401 int32_t helper_fqtoi(void)
403 return float128_to_int32_round_to_zero(QT1, &env->fp_status);
406 #ifdef TARGET_SPARC64
407 void helper_fstox(float32 src)
409 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
412 void helper_fdtox(void)
414 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
417 void helper_fqtox(void)
419 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
422 void helper_faligndata(void)
424 uint64_t tmp;
426 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
427 /* on many architectures a shift of 64 does nothing */
428 if ((env->gsr & 7) != 0) {
429 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
431 *((uint64_t *)&DT0) = tmp;
434 #ifdef HOST_WORDS_BIGENDIAN
435 #define VIS_B64(n) b[7 - (n)]
436 #define VIS_W64(n) w[3 - (n)]
437 #define VIS_SW64(n) sw[3 - (n)]
438 #define VIS_L64(n) l[1 - (n)]
439 #define VIS_B32(n) b[3 - (n)]
440 #define VIS_W32(n) w[1 - (n)]
441 #else
442 #define VIS_B64(n) b[n]
443 #define VIS_W64(n) w[n]
444 #define VIS_SW64(n) sw[n]
445 #define VIS_L64(n) l[n]
446 #define VIS_B32(n) b[n]
447 #define VIS_W32(n) w[n]
448 #endif
450 typedef union {
451 uint8_t b[8];
452 uint16_t w[4];
453 int16_t sw[4];
454 uint32_t l[2];
455 float64 d;
456 } vis64;
458 typedef union {
459 uint8_t b[4];
460 uint16_t w[2];
461 uint32_t l;
462 float32 f;
463 } vis32;
465 void helper_fpmerge(void)
467 vis64 s, d;
469 s.d = DT0;
470 d.d = DT1;
472 // Reverse calculation order to handle overlap
473 d.VIS_B64(7) = s.VIS_B64(3);
474 d.VIS_B64(6) = d.VIS_B64(3);
475 d.VIS_B64(5) = s.VIS_B64(2);
476 d.VIS_B64(4) = d.VIS_B64(2);
477 d.VIS_B64(3) = s.VIS_B64(1);
478 d.VIS_B64(2) = d.VIS_B64(1);
479 d.VIS_B64(1) = s.VIS_B64(0);
480 //d.VIS_B64(0) = d.VIS_B64(0);
482 DT0 = d.d;
485 void helper_fmul8x16(void)
487 vis64 s, d;
488 uint32_t tmp;
490 s.d = DT0;
491 d.d = DT1;
493 #define PMUL(r) \
494 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
495 if ((tmp & 0xff) > 0x7f) \
496 tmp += 0x100; \
497 d.VIS_W64(r) = tmp >> 8;
499 PMUL(0);
500 PMUL(1);
501 PMUL(2);
502 PMUL(3);
503 #undef PMUL
505 DT0 = d.d;
508 void helper_fmul8x16al(void)
510 vis64 s, d;
511 uint32_t tmp;
513 s.d = DT0;
514 d.d = DT1;
516 #define PMUL(r) \
517 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
518 if ((tmp & 0xff) > 0x7f) \
519 tmp += 0x100; \
520 d.VIS_W64(r) = tmp >> 8;
522 PMUL(0);
523 PMUL(1);
524 PMUL(2);
525 PMUL(3);
526 #undef PMUL
528 DT0 = d.d;
531 void helper_fmul8x16au(void)
533 vis64 s, d;
534 uint32_t tmp;
536 s.d = DT0;
537 d.d = DT1;
539 #define PMUL(r) \
540 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
541 if ((tmp & 0xff) > 0x7f) \
542 tmp += 0x100; \
543 d.VIS_W64(r) = tmp >> 8;
545 PMUL(0);
546 PMUL(1);
547 PMUL(2);
548 PMUL(3);
549 #undef PMUL
551 DT0 = d.d;
554 void helper_fmul8sux16(void)
556 vis64 s, d;
557 uint32_t tmp;
559 s.d = DT0;
560 d.d = DT1;
562 #define PMUL(r) \
563 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
564 if ((tmp & 0xff) > 0x7f) \
565 tmp += 0x100; \
566 d.VIS_W64(r) = tmp >> 8;
568 PMUL(0);
569 PMUL(1);
570 PMUL(2);
571 PMUL(3);
572 #undef PMUL
574 DT0 = d.d;
577 void helper_fmul8ulx16(void)
579 vis64 s, d;
580 uint32_t tmp;
582 s.d = DT0;
583 d.d = DT1;
585 #define PMUL(r) \
586 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
587 if ((tmp & 0xff) > 0x7f) \
588 tmp += 0x100; \
589 d.VIS_W64(r) = tmp >> 8;
591 PMUL(0);
592 PMUL(1);
593 PMUL(2);
594 PMUL(3);
595 #undef PMUL
597 DT0 = d.d;
600 void helper_fmuld8sux16(void)
602 vis64 s, d;
603 uint32_t tmp;
605 s.d = DT0;
606 d.d = DT1;
608 #define PMUL(r) \
609 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
610 if ((tmp & 0xff) > 0x7f) \
611 tmp += 0x100; \
612 d.VIS_L64(r) = tmp;
614 // Reverse calculation order to handle overlap
615 PMUL(1);
616 PMUL(0);
617 #undef PMUL
619 DT0 = d.d;
622 void helper_fmuld8ulx16(void)
624 vis64 s, d;
625 uint32_t tmp;
627 s.d = DT0;
628 d.d = DT1;
630 #define PMUL(r) \
631 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
632 if ((tmp & 0xff) > 0x7f) \
633 tmp += 0x100; \
634 d.VIS_L64(r) = tmp;
636 // Reverse calculation order to handle overlap
637 PMUL(1);
638 PMUL(0);
639 #undef PMUL
641 DT0 = d.d;
644 void helper_fexpand(void)
646 vis32 s;
647 vis64 d;
649 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
650 d.d = DT1;
651 d.VIS_W64(0) = s.VIS_B32(0) << 4;
652 d.VIS_W64(1) = s.VIS_B32(1) << 4;
653 d.VIS_W64(2) = s.VIS_B32(2) << 4;
654 d.VIS_W64(3) = s.VIS_B32(3) << 4;
656 DT0 = d.d;
659 #define VIS_HELPER(name, F) \
660 void name##16(void) \
662 vis64 s, d; \
664 s.d = DT0; \
665 d.d = DT1; \
667 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
668 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
669 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
670 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
672 DT0 = d.d; \
675 uint32_t name##16s(uint32_t src1, uint32_t src2) \
677 vis32 s, d; \
679 s.l = src1; \
680 d.l = src2; \
682 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
683 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
685 return d.l; \
688 void name##32(void) \
690 vis64 s, d; \
692 s.d = DT0; \
693 d.d = DT1; \
695 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
696 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
698 DT0 = d.d; \
701 uint32_t name##32s(uint32_t src1, uint32_t src2) \
703 vis32 s, d; \
705 s.l = src1; \
706 d.l = src2; \
708 d.l = F(d.l, s.l); \
710 return d.l; \
713 #define FADD(a, b) ((a) + (b))
714 #define FSUB(a, b) ((a) - (b))
715 VIS_HELPER(helper_fpadd, FADD)
716 VIS_HELPER(helper_fpsub, FSUB)
718 #define VIS_CMPHELPER(name, F) \
719 void name##16(void) \
721 vis64 s, d; \
723 s.d = DT0; \
724 d.d = DT1; \
726 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
727 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
728 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
729 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
731 DT0 = d.d; \
734 void name##32(void) \
736 vis64 s, d; \
738 s.d = DT0; \
739 d.d = DT1; \
741 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
742 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
744 DT0 = d.d; \
747 #define FCMPGT(a, b) ((a) > (b))
748 #define FCMPEQ(a, b) ((a) == (b))
749 #define FCMPLE(a, b) ((a) <= (b))
750 #define FCMPNE(a, b) ((a) != (b))
752 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
753 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
754 VIS_CMPHELPER(helper_fcmple, FCMPLE)
755 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
756 #endif
758 void helper_check_ieee_exceptions(void)
760 target_ulong status;
762 status = get_float_exception_flags(&env->fp_status);
763 if (status) {
764 /* Copy IEEE 754 flags into FSR */
765 if (status & float_flag_invalid)
766 env->fsr |= FSR_NVC;
767 if (status & float_flag_overflow)
768 env->fsr |= FSR_OFC;
769 if (status & float_flag_underflow)
770 env->fsr |= FSR_UFC;
771 if (status & float_flag_divbyzero)
772 env->fsr |= FSR_DZC;
773 if (status & float_flag_inexact)
774 env->fsr |= FSR_NXC;
776 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
777 /* Unmasked exception, generate a trap */
778 env->fsr |= FSR_FTT_IEEE_EXCP;
779 raise_exception(TT_FP_EXCP);
780 } else {
781 /* Accumulate exceptions */
782 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
787 void helper_clear_float_exceptions(void)
789 set_float_exception_flags(0, &env->fp_status);
792 float32 helper_fabss(float32 src)
794 return float32_abs(src);
797 #ifdef TARGET_SPARC64
798 void helper_fabsd(void)
800 DT0 = float64_abs(DT1);
803 void helper_fabsq(void)
805 QT0 = float128_abs(QT1);
807 #endif
809 float32 helper_fsqrts(float32 src)
811 return float32_sqrt(src, &env->fp_status);
814 void helper_fsqrtd(void)
816 DT0 = float64_sqrt(DT1, &env->fp_status);
819 void helper_fsqrtq(void)
821 QT0 = float128_sqrt(QT1, &env->fp_status);
824 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
825 void glue(helper_, name) (void) \
827 target_ulong new_fsr; \
829 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
830 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
831 case float_relation_unordered: \
832 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
833 if ((env->fsr & FSR_NVM) || TRAP) { \
834 env->fsr |= new_fsr; \
835 env->fsr |= FSR_NVC; \
836 env->fsr |= FSR_FTT_IEEE_EXCP; \
837 raise_exception(TT_FP_EXCP); \
838 } else { \
839 env->fsr |= FSR_NVA; \
841 break; \
842 case float_relation_less: \
843 new_fsr = FSR_FCC0 << FS; \
844 break; \
845 case float_relation_greater: \
846 new_fsr = FSR_FCC1 << FS; \
847 break; \
848 default: \
849 new_fsr = 0; \
850 break; \
852 env->fsr |= new_fsr; \
854 #define GEN_FCMPS(name, size, FS, TRAP) \
855 void glue(helper_, name)(float32 src1, float32 src2) \
857 target_ulong new_fsr; \
859 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
860 switch (glue(size, _compare) (src1, src2, &env->fp_status)) { \
861 case float_relation_unordered: \
862 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
863 if ((env->fsr & FSR_NVM) || TRAP) { \
864 env->fsr |= new_fsr; \
865 env->fsr |= FSR_NVC; \
866 env->fsr |= FSR_FTT_IEEE_EXCP; \
867 raise_exception(TT_FP_EXCP); \
868 } else { \
869 env->fsr |= FSR_NVA; \
871 break; \
872 case float_relation_less: \
873 new_fsr = FSR_FCC0 << FS; \
874 break; \
875 case float_relation_greater: \
876 new_fsr = FSR_FCC1 << FS; \
877 break; \
878 default: \
879 new_fsr = 0; \
880 break; \
882 env->fsr |= new_fsr; \
885 GEN_FCMPS(fcmps, float32, 0, 0);
886 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
888 GEN_FCMPS(fcmpes, float32, 0, 1);
889 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
891 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
892 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
894 static uint32_t compute_all_flags(void)
896 return env->psr & PSR_ICC;
899 static uint32_t compute_C_flags(void)
901 return env->psr & PSR_CARRY;
904 static inline uint32_t get_NZ_icc(target_ulong dst)
906 uint32_t ret = 0;
908 if (!(dst & 0xffffffffULL))
909 ret |= PSR_ZERO;
910 if ((int32_t) (dst & 0xffffffffULL) < 0)
911 ret |= PSR_NEG;
912 return ret;
915 #ifdef TARGET_SPARC64
916 static uint32_t compute_all_flags_xcc(void)
918 return env->xcc & PSR_ICC;
921 static uint32_t compute_C_flags_xcc(void)
923 return env->xcc & PSR_CARRY;
926 static inline uint32_t get_NZ_xcc(target_ulong dst)
928 uint32_t ret = 0;
930 if (!dst)
931 ret |= PSR_ZERO;
932 if ((int64_t)dst < 0)
933 ret |= PSR_NEG;
934 return ret;
936 #endif
938 static inline uint32_t get_V_div_icc(target_ulong src2)
940 uint32_t ret = 0;
942 if (src2 != 0)
943 ret |= PSR_OVF;
944 return ret;
947 static uint32_t compute_all_div(void)
949 uint32_t ret;
951 ret = get_NZ_icc(CC_DST);
952 ret |= get_V_div_icc(CC_SRC2);
953 return ret;
956 static uint32_t compute_C_div(void)
958 return 0;
961 /* carry = (src1[31] & src2[31]) | ( ~dst[31] & (src1[31] | src2[31])) */
962 static inline uint32_t get_C_add_icc(target_ulong dst, target_ulong src1,
963 target_ulong src2)
965 uint32_t ret = 0;
967 if (((src1 & (1ULL << 31)) & (src2 & (1ULL << 31)))
968 | ((~(dst & (1ULL << 31)))
969 & ((src1 & (1ULL << 31)) | (src2 & (1ULL << 31)))))
970 ret |= PSR_CARRY;
971 return ret;
974 static inline uint32_t get_V_add_icc(target_ulong dst, target_ulong src1,
975 target_ulong src2)
977 uint32_t ret = 0;
979 if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 31))
980 ret |= PSR_OVF;
981 return ret;
984 #ifdef TARGET_SPARC64
985 static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
987 uint32_t ret = 0;
989 if (dst < src1)
990 ret |= PSR_CARRY;
991 return ret;
994 static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
995 target_ulong src2)
997 uint32_t ret = 0;
999 if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63))
1000 ret |= PSR_OVF;
1001 return ret;
1004 static uint32_t compute_all_add_xcc(void)
1006 uint32_t ret;
1008 ret = get_NZ_xcc(CC_DST);
1009 ret |= get_C_add_xcc(CC_DST, CC_SRC);
1010 ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1011 return ret;
1014 static uint32_t compute_C_add_xcc(void)
1016 return get_C_add_xcc(CC_DST, CC_SRC);
1018 #endif
1020 static uint32_t compute_all_add(void)
1022 uint32_t ret;
1024 ret = get_NZ_icc(CC_DST);
1025 ret |= get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
1026 ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1027 return ret;
1030 static uint32_t compute_C_add(void)
1032 return get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
1035 #ifdef TARGET_SPARC64
1036 static uint32_t compute_all_addx_xcc(void)
1038 uint32_t ret;
1040 ret = get_NZ_xcc(CC_DST);
1041 ret |= get_C_add_xcc(CC_DST - CC_SRC2, CC_SRC);
1042 ret |= get_C_add_xcc(CC_DST, CC_SRC);
1043 ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1044 return ret;
1047 static uint32_t compute_C_addx_xcc(void)
1049 uint32_t ret;
1051 ret = get_C_add_xcc(CC_DST - CC_SRC2, CC_SRC);
1052 ret |= get_C_add_xcc(CC_DST, CC_SRC);
1053 return ret;
1055 #endif
1057 static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
1059 uint32_t ret = 0;
1061 if ((src1 | src2) & 0x3)
1062 ret |= PSR_OVF;
1063 return ret;
1066 static uint32_t compute_all_tadd(void)
1068 uint32_t ret;
1070 ret = get_NZ_icc(CC_DST);
1071 ret |= get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
1072 ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1073 ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1074 return ret;
1077 static uint32_t compute_C_tadd(void)
1079 return get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
1082 static uint32_t compute_all_taddtv(void)
1084 uint32_t ret;
1086 ret = get_NZ_icc(CC_DST);
1087 ret |= get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
1088 return ret;
1091 static uint32_t compute_C_taddtv(void)
1093 return get_C_add_icc(CC_DST, CC_SRC, CC_SRC2);
1096 /* carry = (~src1[31] & src2[31]) | ( dst[31] & (~src1[31] | src2[31])) */
1097 static inline uint32_t get_C_sub_icc(target_ulong dst, target_ulong src1,
1098 target_ulong src2)
1100 uint32_t ret = 0;
1102 if (((~(src1 & (1ULL << 31))) & (src2 & (1ULL << 31)))
1103 | ((dst & (1ULL << 31)) & (( ~(src1 & (1ULL << 31)))
1104 | (src2 & (1ULL << 31)))))
1105 ret |= PSR_CARRY;
1106 return ret;
1109 static inline uint32_t get_V_sub_icc(target_ulong dst, target_ulong src1,
1110 target_ulong src2)
1112 uint32_t ret = 0;
1114 if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 31))
1115 ret |= PSR_OVF;
1116 return ret;
1120 #ifdef TARGET_SPARC64
1121 static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
1123 uint32_t ret = 0;
1125 if (src1 < src2)
1126 ret |= PSR_CARRY;
1127 return ret;
1130 static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
1131 target_ulong src2)
1133 uint32_t ret = 0;
1135 if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63))
1136 ret |= PSR_OVF;
1137 return ret;
1140 static uint32_t compute_all_sub_xcc(void)
1142 uint32_t ret;
1144 ret = get_NZ_xcc(CC_DST);
1145 ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
1146 ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1147 return ret;
1150 static uint32_t compute_C_sub_xcc(void)
1152 return get_C_sub_xcc(CC_SRC, CC_SRC2);
1154 #endif
1156 static uint32_t compute_all_sub(void)
1158 uint32_t ret;
1160 ret = get_NZ_icc(CC_DST);
1161 ret |= get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1162 ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1163 return ret;
1166 static uint32_t compute_C_sub(void)
1168 return get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1171 #ifdef TARGET_SPARC64
1172 static uint32_t compute_all_subx_xcc(void)
1174 uint32_t ret;
1176 ret = get_NZ_xcc(CC_DST);
1177 ret |= get_C_sub_xcc(CC_DST - CC_SRC2, CC_SRC);
1178 ret |= get_C_sub_xcc(CC_DST, CC_SRC2);
1179 ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1180 return ret;
1183 static uint32_t compute_C_subx_xcc(void)
1185 uint32_t ret;
1187 ret = get_C_sub_xcc(CC_DST - CC_SRC2, CC_SRC);
1188 ret |= get_C_sub_xcc(CC_DST, CC_SRC2);
1189 return ret;
1191 #endif
1193 static uint32_t compute_all_tsub(void)
1195 uint32_t ret;
1197 ret = get_NZ_icc(CC_DST);
1198 ret |= get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1199 ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1200 ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1201 return ret;
1204 static uint32_t compute_C_tsub(void)
1206 return get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1209 static uint32_t compute_all_tsubtv(void)
1211 uint32_t ret;
1213 ret = get_NZ_icc(CC_DST);
1214 ret |= get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1215 return ret;
1218 static uint32_t compute_C_tsubtv(void)
1220 return get_C_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1223 static uint32_t compute_all_logic(void)
1225 return get_NZ_icc(CC_DST);
1228 static uint32_t compute_C_logic(void)
1230 return 0;
1233 #ifdef TARGET_SPARC64
1234 static uint32_t compute_all_logic_xcc(void)
1236 return get_NZ_xcc(CC_DST);
1238 #endif
1240 typedef struct CCTable {
1241 uint32_t (*compute_all)(void); /* return all the flags */
1242 uint32_t (*compute_c)(void); /* return the C flag */
1243 } CCTable;
1245 static const CCTable icc_table[CC_OP_NB] = {
1246 /* CC_OP_DYNAMIC should never happen */
1247 [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
1248 [CC_OP_DIV] = { compute_all_div, compute_C_div },
1249 [CC_OP_ADD] = { compute_all_add, compute_C_add },
1250 [CC_OP_ADDX] = { compute_all_add, compute_C_add },
1251 [CC_OP_TADD] = { compute_all_tadd, compute_C_tadd },
1252 [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_taddtv },
1253 [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
1254 [CC_OP_SUBX] = { compute_all_sub, compute_C_sub },
1255 [CC_OP_TSUB] = { compute_all_tsub, compute_C_tsub },
1256 [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_tsubtv },
1257 [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
1260 #ifdef TARGET_SPARC64
1261 static const CCTable xcc_table[CC_OP_NB] = {
1262 /* CC_OP_DYNAMIC should never happen */
1263 [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
1264 [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
1265 [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
1266 [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
1267 [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
1268 [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
1269 [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1270 [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
1271 [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1272 [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
1273 [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
1275 #endif
1277 void helper_compute_psr(void)
1279 uint32_t new_psr;
1281 new_psr = icc_table[CC_OP].compute_all();
1282 env->psr = new_psr;
1283 #ifdef TARGET_SPARC64
1284 new_psr = xcc_table[CC_OP].compute_all();
1285 env->xcc = new_psr;
1286 #endif
1287 CC_OP = CC_OP_FLAGS;
1290 uint32_t helper_compute_C_icc(void)
1292 uint32_t ret;
1294 ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
1295 return ret;
1298 #ifdef TARGET_SPARC64
1299 GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
1300 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
1301 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
1303 GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
1304 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
1305 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
1307 GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
1308 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
1309 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
1311 GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
1312 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
1313 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
1315 GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
1316 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
1317 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
1319 GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
1320 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
1321 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
1322 #endif
1323 #undef GEN_FCMPS
1325 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
1326 defined(DEBUG_MXCC)
1327 static void dump_mxcc(CPUState *env)
1329 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1330 "\n",
1331 env->mxccdata[0], env->mxccdata[1],
1332 env->mxccdata[2], env->mxccdata[3]);
1333 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1334 "\n"
1335 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1336 "\n",
1337 env->mxccregs[0], env->mxccregs[1],
1338 env->mxccregs[2], env->mxccregs[3],
1339 env->mxccregs[4], env->mxccregs[5],
1340 env->mxccregs[6], env->mxccregs[7]);
1342 #endif
1344 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
1345 && defined(DEBUG_ASI)
1346 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
1347 uint64_t r1)
1349 switch (size)
1351 case 1:
1352 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
1353 addr, asi, r1 & 0xff);
1354 break;
1355 case 2:
1356 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
1357 addr, asi, r1 & 0xffff);
1358 break;
1359 case 4:
1360 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
1361 addr, asi, r1 & 0xffffffff);
1362 break;
1363 case 8:
1364 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
1365 addr, asi, r1);
1366 break;
1369 #endif
1371 #ifndef TARGET_SPARC64
1372 #ifndef CONFIG_USER_ONLY
1373 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1375 uint64_t ret = 0;
1376 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1377 uint32_t last_addr = addr;
1378 #endif
1380 helper_check_align(addr, size - 1);
1381 switch (asi) {
1382 case 2: /* SuperSparc MXCC registers */
1383 switch (addr) {
1384 case 0x01c00a00: /* MXCC control register */
1385 if (size == 8)
1386 ret = env->mxccregs[3];
1387 else
1388 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1389 size);
1390 break;
1391 case 0x01c00a04: /* MXCC control register */
1392 if (size == 4)
1393 ret = env->mxccregs[3];
1394 else
1395 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1396 size);
1397 break;
1398 case 0x01c00c00: /* Module reset register */
1399 if (size == 8) {
1400 ret = env->mxccregs[5];
1401 // should we do something here?
1402 } else
1403 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1404 size);
1405 break;
1406 case 0x01c00f00: /* MBus port address register */
1407 if (size == 8)
1408 ret = env->mxccregs[7];
1409 else
1410 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1411 size);
1412 break;
1413 default:
1414 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1415 size);
1416 break;
1418 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1419 "addr = %08x -> ret = %" PRIx64 ","
1420 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
1421 #ifdef DEBUG_MXCC
1422 dump_mxcc(env);
1423 #endif
1424 break;
1425 case 3: /* MMU probe */
1427 int mmulev;
1429 mmulev = (addr >> 8) & 15;
1430 if (mmulev > 4)
1431 ret = 0;
1432 else
1433 ret = mmu_probe(env, addr, mmulev);
1434 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
1435 addr, mmulev, ret);
1437 break;
1438 case 4: /* read MMU regs */
1440 int reg = (addr >> 8) & 0x1f;
1442 ret = env->mmuregs[reg];
1443 if (reg == 3) /* Fault status cleared on read */
1444 env->mmuregs[3] = 0;
1445 else if (reg == 0x13) /* Fault status read */
1446 ret = env->mmuregs[3];
1447 else if (reg == 0x14) /* Fault address read */
1448 ret = env->mmuregs[4];
1449 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
1451 break;
1452 case 5: // Turbosparc ITLB Diagnostic
1453 case 6: // Turbosparc DTLB Diagnostic
1454 case 7: // Turbosparc IOTLB Diagnostic
1455 break;
1456 case 9: /* Supervisor code access */
1457 switch(size) {
1458 case 1:
1459 ret = ldub_code(addr);
1460 break;
1461 case 2:
1462 ret = lduw_code(addr);
1463 break;
1464 default:
1465 case 4:
1466 ret = ldl_code(addr);
1467 break;
1468 case 8:
1469 ret = ldq_code(addr);
1470 break;
1472 break;
1473 case 0xa: /* User data access */
1474 switch(size) {
1475 case 1:
1476 ret = ldub_user(addr);
1477 break;
1478 case 2:
1479 ret = lduw_user(addr);
1480 break;
1481 default:
1482 case 4:
1483 ret = ldl_user(addr);
1484 break;
1485 case 8:
1486 ret = ldq_user(addr);
1487 break;
1489 break;
1490 case 0xb: /* Supervisor data access */
1491 switch(size) {
1492 case 1:
1493 ret = ldub_kernel(addr);
1494 break;
1495 case 2:
1496 ret = lduw_kernel(addr);
1497 break;
1498 default:
1499 case 4:
1500 ret = ldl_kernel(addr);
1501 break;
1502 case 8:
1503 ret = ldq_kernel(addr);
1504 break;
1506 break;
1507 case 0xc: /* I-cache tag */
1508 case 0xd: /* I-cache data */
1509 case 0xe: /* D-cache tag */
1510 case 0xf: /* D-cache data */
1511 break;
1512 case 0x20: /* MMU passthrough */
1513 switch(size) {
1514 case 1:
1515 ret = ldub_phys(addr);
1516 break;
1517 case 2:
1518 ret = lduw_phys(addr);
1519 break;
1520 default:
1521 case 4:
1522 ret = ldl_phys(addr);
1523 break;
1524 case 8:
1525 ret = ldq_phys(addr);
1526 break;
1528 break;
1529 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1530 switch(size) {
1531 case 1:
1532 ret = ldub_phys((target_phys_addr_t)addr
1533 | ((target_phys_addr_t)(asi & 0xf) << 32));
1534 break;
1535 case 2:
1536 ret = lduw_phys((target_phys_addr_t)addr
1537 | ((target_phys_addr_t)(asi & 0xf) << 32));
1538 break;
1539 default:
1540 case 4:
1541 ret = ldl_phys((target_phys_addr_t)addr
1542 | ((target_phys_addr_t)(asi & 0xf) << 32));
1543 break;
1544 case 8:
1545 ret = ldq_phys((target_phys_addr_t)addr
1546 | ((target_phys_addr_t)(asi & 0xf) << 32));
1547 break;
1549 break;
1550 case 0x30: // Turbosparc secondary cache diagnostic
1551 case 0x31: // Turbosparc RAM snoop
1552 case 0x32: // Turbosparc page table descriptor diagnostic
1553 case 0x39: /* data cache diagnostic register */
1554 ret = 0;
1555 break;
1556 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1558 int reg = (addr >> 8) & 3;
1560 switch(reg) {
1561 case 0: /* Breakpoint Value (Addr) */
1562 ret = env->mmubpregs[reg];
1563 break;
1564 case 1: /* Breakpoint Mask */
1565 ret = env->mmubpregs[reg];
1566 break;
1567 case 2: /* Breakpoint Control */
1568 ret = env->mmubpregs[reg];
1569 break;
1570 case 3: /* Breakpoint Status */
1571 ret = env->mmubpregs[reg];
1572 env->mmubpregs[reg] = 0ULL;
1573 break;
1575 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
1576 ret);
1578 break;
1579 case 8: /* User code access, XXX */
1580 default:
1581 do_unassigned_access(addr, 0, 0, asi, size);
1582 ret = 0;
1583 break;
1585 if (sign) {
1586 switch(size) {
1587 case 1:
1588 ret = (int8_t) ret;
1589 break;
1590 case 2:
1591 ret = (int16_t) ret;
1592 break;
1593 case 4:
1594 ret = (int32_t) ret;
1595 break;
1596 default:
1597 break;
1600 #ifdef DEBUG_ASI
1601 dump_asi("read ", last_addr, asi, size, ret);
1602 #endif
1603 return ret;
1606 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1608 helper_check_align(addr, size - 1);
1609 switch(asi) {
1610 case 2: /* SuperSparc MXCC registers */
1611 switch (addr) {
1612 case 0x01c00000: /* MXCC stream data register 0 */
1613 if (size == 8)
1614 env->mxccdata[0] = val;
1615 else
1616 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1617 size);
1618 break;
1619 case 0x01c00008: /* MXCC stream data register 1 */
1620 if (size == 8)
1621 env->mxccdata[1] = val;
1622 else
1623 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1624 size);
1625 break;
1626 case 0x01c00010: /* MXCC stream data register 2 */
1627 if (size == 8)
1628 env->mxccdata[2] = val;
1629 else
1630 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1631 size);
1632 break;
1633 case 0x01c00018: /* MXCC stream data register 3 */
1634 if (size == 8)
1635 env->mxccdata[3] = val;
1636 else
1637 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1638 size);
1639 break;
1640 case 0x01c00100: /* MXCC stream source */
1641 if (size == 8)
1642 env->mxccregs[0] = val;
1643 else
1644 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1645 size);
1646 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1648 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1650 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1651 16);
1652 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1653 24);
1654 break;
1655 case 0x01c00200: /* MXCC stream destination */
1656 if (size == 8)
1657 env->mxccregs[1] = val;
1658 else
1659 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1660 size);
1661 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
1662 env->mxccdata[0]);
1663 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
1664 env->mxccdata[1]);
1665 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1666 env->mxccdata[2]);
1667 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1668 env->mxccdata[3]);
1669 break;
1670 case 0x01c00a00: /* MXCC control register */
1671 if (size == 8)
1672 env->mxccregs[3] = val;
1673 else
1674 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1675 size);
1676 break;
1677 case 0x01c00a04: /* MXCC control register */
1678 if (size == 4)
1679 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
1680 | val;
1681 else
1682 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1683 size);
1684 break;
1685 case 0x01c00e00: /* MXCC error register */
1686 // writing a 1 bit clears the error
1687 if (size == 8)
1688 env->mxccregs[6] &= ~val;
1689 else
1690 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1691 size);
1692 break;
1693 case 0x01c00f00: /* MBus port address register */
1694 if (size == 8)
1695 env->mxccregs[7] = val;
1696 else
1697 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1698 size);
1699 break;
1700 default:
1701 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1702 size);
1703 break;
1705 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
1706 asi, size, addr, val);
1707 #ifdef DEBUG_MXCC
1708 dump_mxcc(env);
1709 #endif
1710 break;
1711 case 3: /* MMU flush */
1713 int mmulev;
1715 mmulev = (addr >> 8) & 15;
1716 DPRINTF_MMU("mmu flush level %d\n", mmulev);
1717 switch (mmulev) {
1718 case 0: // flush page
1719 tlb_flush_page(env, addr & 0xfffff000);
1720 break;
1721 case 1: // flush segment (256k)
1722 case 2: // flush region (16M)
1723 case 3: // flush context (4G)
1724 case 4: // flush entire
1725 tlb_flush(env, 1);
1726 break;
1727 default:
1728 break;
1730 #ifdef DEBUG_MMU
1731 dump_mmu(env);
1732 #endif
1734 break;
1735 case 4: /* write MMU regs */
1737 int reg = (addr >> 8) & 0x1f;
1738 uint32_t oldreg;
1740 oldreg = env->mmuregs[reg];
1741 switch(reg) {
1742 case 0: // Control Register
1743 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1744 (val & 0x00ffffff);
1745 // Mappings generated during no-fault mode or MMU
1746 // disabled mode are invalid in normal mode
1747 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1748 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1749 tlb_flush(env, 1);
1750 break;
1751 case 1: // Context Table Pointer Register
1752 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1753 break;
1754 case 2: // Context Register
1755 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1756 if (oldreg != env->mmuregs[reg]) {
1757 /* we flush when the MMU context changes because
1758 QEMU has no MMU context support */
1759 tlb_flush(env, 1);
1761 break;
1762 case 3: // Synchronous Fault Status Register with Clear
1763 case 4: // Synchronous Fault Address Register
1764 break;
1765 case 0x10: // TLB Replacement Control Register
1766 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1767 break;
1768 case 0x13: // Synchronous Fault Status Register with Read and Clear
1769 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
1770 break;
1771 case 0x14: // Synchronous Fault Address Register
1772 env->mmuregs[4] = val;
1773 break;
1774 default:
1775 env->mmuregs[reg] = val;
1776 break;
1778 if (oldreg != env->mmuregs[reg]) {
1779 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1780 reg, oldreg, env->mmuregs[reg]);
1782 #ifdef DEBUG_MMU
1783 dump_mmu(env);
1784 #endif
1786 break;
1787 case 5: // Turbosparc ITLB Diagnostic
1788 case 6: // Turbosparc DTLB Diagnostic
1789 case 7: // Turbosparc IOTLB Diagnostic
1790 break;
1791 case 0xa: /* User data access */
1792 switch(size) {
1793 case 1:
1794 stb_user(addr, val);
1795 break;
1796 case 2:
1797 stw_user(addr, val);
1798 break;
1799 default:
1800 case 4:
1801 stl_user(addr, val);
1802 break;
1803 case 8:
1804 stq_user(addr, val);
1805 break;
1807 break;
1808 case 0xb: /* Supervisor data access */
1809 switch(size) {
1810 case 1:
1811 stb_kernel(addr, val);
1812 break;
1813 case 2:
1814 stw_kernel(addr, val);
1815 break;
1816 default:
1817 case 4:
1818 stl_kernel(addr, val);
1819 break;
1820 case 8:
1821 stq_kernel(addr, val);
1822 break;
1824 break;
1825 case 0xc: /* I-cache tag */
1826 case 0xd: /* I-cache data */
1827 case 0xe: /* D-cache tag */
1828 case 0xf: /* D-cache data */
1829 case 0x10: /* I/D-cache flush page */
1830 case 0x11: /* I/D-cache flush segment */
1831 case 0x12: /* I/D-cache flush region */
1832 case 0x13: /* I/D-cache flush context */
1833 case 0x14: /* I/D-cache flush user */
1834 break;
1835 case 0x17: /* Block copy, sta access */
1837 // val = src
1838 // addr = dst
1839 // copy 32 bytes
1840 unsigned int i;
1841 uint32_t src = val & ~3, dst = addr & ~3, temp;
1843 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
1844 temp = ldl_kernel(src);
1845 stl_kernel(dst, temp);
1848 break;
1849 case 0x1f: /* Block fill, stda access */
1851 // addr = dst
1852 // fill 32 bytes with val
1853 unsigned int i;
1854 uint32_t dst = addr & 7;
1856 for (i = 0; i < 32; i += 8, dst += 8)
1857 stq_kernel(dst, val);
1859 break;
1860 case 0x20: /* MMU passthrough */
1862 switch(size) {
1863 case 1:
1864 stb_phys(addr, val);
1865 break;
1866 case 2:
1867 stw_phys(addr, val);
1868 break;
1869 case 4:
1870 default:
1871 stl_phys(addr, val);
1872 break;
1873 case 8:
1874 stq_phys(addr, val);
1875 break;
1878 break;
1879 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1881 switch(size) {
1882 case 1:
1883 stb_phys((target_phys_addr_t)addr
1884 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1885 break;
1886 case 2:
1887 stw_phys((target_phys_addr_t)addr
1888 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1889 break;
1890 case 4:
1891 default:
1892 stl_phys((target_phys_addr_t)addr
1893 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1894 break;
1895 case 8:
1896 stq_phys((target_phys_addr_t)addr
1897 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1898 break;
1901 break;
1902 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1903 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1904 // Turbosparc snoop RAM
1905 case 0x32: // store buffer control or Turbosparc page table
1906 // descriptor diagnostic
1907 case 0x36: /* I-cache flash clear */
1908 case 0x37: /* D-cache flash clear */
1909 case 0x4c: /* breakpoint action */
1910 break;
1911 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1913 int reg = (addr >> 8) & 3;
1915 switch(reg) {
1916 case 0: /* Breakpoint Value (Addr) */
1917 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1918 break;
1919 case 1: /* Breakpoint Mask */
1920 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1921 break;
1922 case 2: /* Breakpoint Control */
1923 env->mmubpregs[reg] = (val & 0x7fULL);
1924 break;
1925 case 3: /* Breakpoint Status */
1926 env->mmubpregs[reg] = (val & 0xfULL);
1927 break;
1929 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
1930 env->mmuregs[reg]);
1932 break;
1933 case 8: /* User code access, XXX */
1934 case 9: /* Supervisor code access, XXX */
1935 default:
1936 do_unassigned_access(addr, 1, 0, asi, size);
1937 break;
1939 #ifdef DEBUG_ASI
1940 dump_asi("write", addr, asi, size, val);
1941 #endif
1944 #endif /* CONFIG_USER_ONLY */
1945 #else /* TARGET_SPARC64 */
1947 #ifdef CONFIG_USER_ONLY
1948 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1950 uint64_t ret = 0;
1951 #if defined(DEBUG_ASI)
1952 target_ulong last_addr = addr;
1953 #endif
1955 if (asi < 0x80)
1956 raise_exception(TT_PRIV_ACT);
1958 helper_check_align(addr, size - 1);
1959 addr = address_mask(env, addr);
1961 switch (asi) {
1962 case 0x82: // Primary no-fault
1963 case 0x8a: // Primary no-fault LE
1964 if (page_check_range(addr, size, PAGE_READ) == -1) {
1965 #ifdef DEBUG_ASI
1966 dump_asi("read ", last_addr, asi, size, ret);
1967 #endif
1968 return 0;
1970 // Fall through
1971 case 0x80: // Primary
1972 case 0x88: // Primary LE
1974 switch(size) {
1975 case 1:
1976 ret = ldub_raw(addr);
1977 break;
1978 case 2:
1979 ret = lduw_raw(addr);
1980 break;
1981 case 4:
1982 ret = ldl_raw(addr);
1983 break;
1984 default:
1985 case 8:
1986 ret = ldq_raw(addr);
1987 break;
1990 break;
1991 case 0x83: // Secondary no-fault
1992 case 0x8b: // Secondary no-fault LE
1993 if (page_check_range(addr, size, PAGE_READ) == -1) {
1994 #ifdef DEBUG_ASI
1995 dump_asi("read ", last_addr, asi, size, ret);
1996 #endif
1997 return 0;
1999 // Fall through
2000 case 0x81: // Secondary
2001 case 0x89: // Secondary LE
2002 // XXX
2003 break;
2004 default:
2005 break;
2008 /* Convert from little endian */
2009 switch (asi) {
2010 case 0x88: // Primary LE
2011 case 0x89: // Secondary LE
2012 case 0x8a: // Primary no-fault LE
2013 case 0x8b: // Secondary no-fault LE
2014 switch(size) {
2015 case 2:
2016 ret = bswap16(ret);
2017 break;
2018 case 4:
2019 ret = bswap32(ret);
2020 break;
2021 case 8:
2022 ret = bswap64(ret);
2023 break;
2024 default:
2025 break;
2027 default:
2028 break;
2031 /* Convert to signed number */
2032 if (sign) {
2033 switch(size) {
2034 case 1:
2035 ret = (int8_t) ret;
2036 break;
2037 case 2:
2038 ret = (int16_t) ret;
2039 break;
2040 case 4:
2041 ret = (int32_t) ret;
2042 break;
2043 default:
2044 break;
2047 #ifdef DEBUG_ASI
2048 dump_asi("read ", last_addr, asi, size, ret);
2049 #endif
2050 return ret;
2053 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2055 #ifdef DEBUG_ASI
2056 dump_asi("write", addr, asi, size, val);
2057 #endif
2058 if (asi < 0x80)
2059 raise_exception(TT_PRIV_ACT);
2061 helper_check_align(addr, size - 1);
2062 addr = address_mask(env, addr);
2064 /* Convert to little endian */
2065 switch (asi) {
2066 case 0x88: // Primary LE
2067 case 0x89: // Secondary LE
2068 switch(size) {
2069 case 2:
2070 val = bswap16(val);
2071 break;
2072 case 4:
2073 val = bswap32(val);
2074 break;
2075 case 8:
2076 val = bswap64(val);
2077 break;
2078 default:
2079 break;
2081 default:
2082 break;
2085 switch(asi) {
2086 case 0x80: // Primary
2087 case 0x88: // Primary LE
2089 switch(size) {
2090 case 1:
2091 stb_raw(addr, val);
2092 break;
2093 case 2:
2094 stw_raw(addr, val);
2095 break;
2096 case 4:
2097 stl_raw(addr, val);
2098 break;
2099 case 8:
2100 default:
2101 stq_raw(addr, val);
2102 break;
2105 break;
2106 case 0x81: // Secondary
2107 case 0x89: // Secondary LE
2108 // XXX
2109 return;
2111 case 0x82: // Primary no-fault, RO
2112 case 0x83: // Secondary no-fault, RO
2113 case 0x8a: // Primary no-fault LE, RO
2114 case 0x8b: // Secondary no-fault LE, RO
2115 default:
2116 do_unassigned_access(addr, 1, 0, 1, size);
2117 return;
2121 #else /* CONFIG_USER_ONLY */
2123 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2125 uint64_t ret = 0;
2126 #if defined(DEBUG_ASI)
2127 target_ulong last_addr = addr;
2128 #endif
2130 asi &= 0xff;
2132 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2133 || ((env->def->features & CPU_FEATURE_HYPV)
2134 && asi >= 0x30 && asi < 0x80
2135 && !(env->hpstate & HS_PRIV)))
2136 raise_exception(TT_PRIV_ACT);
2138 helper_check_align(addr, size - 1);
2139 switch (asi) {
2140 case 0x82: // Primary no-fault
2141 case 0x8a: // Primary no-fault LE
2142 case 0x83: // Secondary no-fault
2143 case 0x8b: // Secondary no-fault LE
2145 /* secondary space access has lowest asi bit equal to 1 */
2146 int access_mmu_idx = ( asi & 1 ) ? MMU_KERNEL_IDX
2147 : MMU_KERNEL_SECONDARY_IDX;
2149 if (cpu_get_phys_page_nofault(env, addr, access_mmu_idx) == -1ULL) {
2150 #ifdef DEBUG_ASI
2151 dump_asi("read ", last_addr, asi, size, ret);
2152 #endif
2153 return 0;
2156 // Fall through
2157 case 0x10: // As if user primary
2158 case 0x11: // As if user secondary
2159 case 0x18: // As if user primary LE
2160 case 0x19: // As if user secondary LE
2161 case 0x80: // Primary
2162 case 0x81: // Secondary
2163 case 0x88: // Primary LE
2164 case 0x89: // Secondary LE
2165 case 0xe2: // UA2007 Primary block init
2166 case 0xe3: // UA2007 Secondary block init
2167 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2168 if ((env->def->features & CPU_FEATURE_HYPV)
2169 && env->hpstate & HS_PRIV) {
2170 switch(size) {
2171 case 1:
2172 ret = ldub_hypv(addr);
2173 break;
2174 case 2:
2175 ret = lduw_hypv(addr);
2176 break;
2177 case 4:
2178 ret = ldl_hypv(addr);
2179 break;
2180 default:
2181 case 8:
2182 ret = ldq_hypv(addr);
2183 break;
2185 } else {
2186 /* secondary space access has lowest asi bit equal to 1 */
2187 if (asi & 1) {
2188 switch(size) {
2189 case 1:
2190 ret = ldub_kernel_secondary(addr);
2191 break;
2192 case 2:
2193 ret = lduw_kernel_secondary(addr);
2194 break;
2195 case 4:
2196 ret = ldl_kernel_secondary(addr);
2197 break;
2198 default:
2199 case 8:
2200 ret = ldq_kernel_secondary(addr);
2201 break;
2203 } else {
2204 switch(size) {
2205 case 1:
2206 ret = ldub_kernel(addr);
2207 break;
2208 case 2:
2209 ret = lduw_kernel(addr);
2210 break;
2211 case 4:
2212 ret = ldl_kernel(addr);
2213 break;
2214 default:
2215 case 8:
2216 ret = ldq_kernel(addr);
2217 break;
2221 } else {
2222 /* secondary space access has lowest asi bit equal to 1 */
2223 if (asi & 1) {
2224 switch(size) {
2225 case 1:
2226 ret = ldub_user_secondary(addr);
2227 break;
2228 case 2:
2229 ret = lduw_user_secondary(addr);
2230 break;
2231 case 4:
2232 ret = ldl_user_secondary(addr);
2233 break;
2234 default:
2235 case 8:
2236 ret = ldq_user_secondary(addr);
2237 break;
2239 } else {
2240 switch(size) {
2241 case 1:
2242 ret = ldub_user(addr);
2243 break;
2244 case 2:
2245 ret = lduw_user(addr);
2246 break;
2247 case 4:
2248 ret = ldl_user(addr);
2249 break;
2250 default:
2251 case 8:
2252 ret = ldq_user(addr);
2253 break;
2257 break;
2258 case 0x14: // Bypass
2259 case 0x15: // Bypass, non-cacheable
2260 case 0x1c: // Bypass LE
2261 case 0x1d: // Bypass, non-cacheable LE
2263 switch(size) {
2264 case 1:
2265 ret = ldub_phys(addr);
2266 break;
2267 case 2:
2268 ret = lduw_phys(addr);
2269 break;
2270 case 4:
2271 ret = ldl_phys(addr);
2272 break;
2273 default:
2274 case 8:
2275 ret = ldq_phys(addr);
2276 break;
2278 break;
2280 case 0x24: // Nucleus quad LDD 128 bit atomic
2281 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2282 // Only ldda allowed
2283 raise_exception(TT_ILL_INSN);
2284 return 0;
2285 case 0x04: // Nucleus
2286 case 0x0c: // Nucleus Little Endian (LE)
2288 switch(size) {
2289 case 1:
2290 ret = ldub_nucleus(addr);
2291 break;
2292 case 2:
2293 ret = lduw_nucleus(addr);
2294 break;
2295 case 4:
2296 ret = ldl_nucleus(addr);
2297 break;
2298 default:
2299 case 8:
2300 ret = ldq_nucleus(addr);
2301 break;
2303 break;
2305 case 0x4a: // UPA config
2306 // XXX
2307 break;
2308 case 0x45: // LSU
2309 ret = env->lsu;
2310 break;
2311 case 0x50: // I-MMU regs
2313 int reg = (addr >> 3) & 0xf;
2315 if (reg == 0) {
2316 // I-TSB Tag Target register
2317 ret = ultrasparc_tag_target(env->immu.tag_access);
2318 } else {
2319 ret = env->immuregs[reg];
2322 break;
2324 case 0x51: // I-MMU 8k TSB pointer
2326 // env->immuregs[5] holds I-MMU TSB register value
2327 // env->immuregs[6] holds I-MMU Tag Access register value
2328 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2329 8*1024);
2330 break;
2332 case 0x52: // I-MMU 64k TSB pointer
2334 // env->immuregs[5] holds I-MMU TSB register value
2335 // env->immuregs[6] holds I-MMU Tag Access register value
2336 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2337 64*1024);
2338 break;
2340 case 0x55: // I-MMU data access
2342 int reg = (addr >> 3) & 0x3f;
2344 ret = env->itlb[reg].tte;
2345 break;
2347 case 0x56: // I-MMU tag read
2349 int reg = (addr >> 3) & 0x3f;
2351 ret = env->itlb[reg].tag;
2352 break;
2354 case 0x58: // D-MMU regs
2356 int reg = (addr >> 3) & 0xf;
2358 if (reg == 0) {
2359 // D-TSB Tag Target register
2360 ret = ultrasparc_tag_target(env->dmmu.tag_access);
2361 } else {
2362 ret = env->dmmuregs[reg];
2364 break;
2366 case 0x59: // D-MMU 8k TSB pointer
2368 // env->dmmuregs[5] holds D-MMU TSB register value
2369 // env->dmmuregs[6] holds D-MMU Tag Access register value
2370 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2371 8*1024);
2372 break;
2374 case 0x5a: // D-MMU 64k TSB pointer
2376 // env->dmmuregs[5] holds D-MMU TSB register value
2377 // env->dmmuregs[6] holds D-MMU Tag Access register value
2378 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2379 64*1024);
2380 break;
2382 case 0x5d: // D-MMU data access
2384 int reg = (addr >> 3) & 0x3f;
2386 ret = env->dtlb[reg].tte;
2387 break;
2389 case 0x5e: // D-MMU tag read
2391 int reg = (addr >> 3) & 0x3f;
2393 ret = env->dtlb[reg].tag;
2394 break;
2396 case 0x46: // D-cache data
2397 case 0x47: // D-cache tag access
2398 case 0x4b: // E-cache error enable
2399 case 0x4c: // E-cache asynchronous fault status
2400 case 0x4d: // E-cache asynchronous fault address
2401 case 0x4e: // E-cache tag data
2402 case 0x66: // I-cache instruction access
2403 case 0x67: // I-cache tag access
2404 case 0x6e: // I-cache predecode
2405 case 0x6f: // I-cache LRU etc.
2406 case 0x76: // E-cache tag
2407 case 0x7e: // E-cache tag
2408 break;
2409 case 0x5b: // D-MMU data pointer
2410 case 0x48: // Interrupt dispatch, RO
2411 case 0x49: // Interrupt data receive
2412 case 0x7f: // Incoming interrupt vector, RO
2413 // XXX
2414 break;
2415 case 0x54: // I-MMU data in, WO
2416 case 0x57: // I-MMU demap, WO
2417 case 0x5c: // D-MMU data in, WO
2418 case 0x5f: // D-MMU demap, WO
2419 case 0x77: // Interrupt vector, WO
2420 default:
2421 do_unassigned_access(addr, 0, 0, 1, size);
2422 ret = 0;
2423 break;
2426 /* Convert from little endian */
2427 switch (asi) {
2428 case 0x0c: // Nucleus Little Endian (LE)
2429 case 0x18: // As if user primary LE
2430 case 0x19: // As if user secondary LE
2431 case 0x1c: // Bypass LE
2432 case 0x1d: // Bypass, non-cacheable LE
2433 case 0x88: // Primary LE
2434 case 0x89: // Secondary LE
2435 case 0x8a: // Primary no-fault LE
2436 case 0x8b: // Secondary no-fault LE
2437 switch(size) {
2438 case 2:
2439 ret = bswap16(ret);
2440 break;
2441 case 4:
2442 ret = bswap32(ret);
2443 break;
2444 case 8:
2445 ret = bswap64(ret);
2446 break;
2447 default:
2448 break;
2450 default:
2451 break;
2454 /* Convert to signed number */
2455 if (sign) {
2456 switch(size) {
2457 case 1:
2458 ret = (int8_t) ret;
2459 break;
2460 case 2:
2461 ret = (int16_t) ret;
2462 break;
2463 case 4:
2464 ret = (int32_t) ret;
2465 break;
2466 default:
2467 break;
2470 #ifdef DEBUG_ASI
2471 dump_asi("read ", last_addr, asi, size, ret);
2472 #endif
2473 return ret;
2476 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2478 #ifdef DEBUG_ASI
2479 dump_asi("write", addr, asi, size, val);
2480 #endif
2482 asi &= 0xff;
2484 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2485 || ((env->def->features & CPU_FEATURE_HYPV)
2486 && asi >= 0x30 && asi < 0x80
2487 && !(env->hpstate & HS_PRIV)))
2488 raise_exception(TT_PRIV_ACT);
2490 helper_check_align(addr, size - 1);
2491 /* Convert to little endian */
2492 switch (asi) {
2493 case 0x0c: // Nucleus Little Endian (LE)
2494 case 0x18: // As if user primary LE
2495 case 0x19: // As if user secondary LE
2496 case 0x1c: // Bypass LE
2497 case 0x1d: // Bypass, non-cacheable LE
2498 case 0x88: // Primary LE
2499 case 0x89: // Secondary LE
2500 switch(size) {
2501 case 2:
2502 val = bswap16(val);
2503 break;
2504 case 4:
2505 val = bswap32(val);
2506 break;
2507 case 8:
2508 val = bswap64(val);
2509 break;
2510 default:
2511 break;
2513 default:
2514 break;
2517 switch(asi) {
2518 case 0x10: // As if user primary
2519 case 0x11: // As if user secondary
2520 case 0x18: // As if user primary LE
2521 case 0x19: // As if user secondary LE
2522 case 0x80: // Primary
2523 case 0x81: // Secondary
2524 case 0x88: // Primary LE
2525 case 0x89: // Secondary LE
2526 case 0xe2: // UA2007 Primary block init
2527 case 0xe3: // UA2007 Secondary block init
2528 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2529 if ((env->def->features & CPU_FEATURE_HYPV)
2530 && env->hpstate & HS_PRIV) {
2531 switch(size) {
2532 case 1:
2533 stb_hypv(addr, val);
2534 break;
2535 case 2:
2536 stw_hypv(addr, val);
2537 break;
2538 case 4:
2539 stl_hypv(addr, val);
2540 break;
2541 case 8:
2542 default:
2543 stq_hypv(addr, val);
2544 break;
2546 } else {
2547 /* secondary space access has lowest asi bit equal to 1 */
2548 if (asi & 1) {
2549 switch(size) {
2550 case 1:
2551 stb_kernel_secondary(addr, val);
2552 break;
2553 case 2:
2554 stw_kernel_secondary(addr, val);
2555 break;
2556 case 4:
2557 stl_kernel_secondary(addr, val);
2558 break;
2559 case 8:
2560 default:
2561 stq_kernel_secondary(addr, val);
2562 break;
2564 } else {
2565 switch(size) {
2566 case 1:
2567 stb_kernel(addr, val);
2568 break;
2569 case 2:
2570 stw_kernel(addr, val);
2571 break;
2572 case 4:
2573 stl_kernel(addr, val);
2574 break;
2575 case 8:
2576 default:
2577 stq_kernel(addr, val);
2578 break;
2582 } else {
2583 /* secondary space access has lowest asi bit equal to 1 */
2584 if (asi & 1) {
2585 switch(size) {
2586 case 1:
2587 stb_user_secondary(addr, val);
2588 break;
2589 case 2:
2590 stw_user_secondary(addr, val);
2591 break;
2592 case 4:
2593 stl_user_secondary(addr, val);
2594 break;
2595 case 8:
2596 default:
2597 stq_user_secondary(addr, val);
2598 break;
2600 } else {
2601 switch(size) {
2602 case 1:
2603 stb_user(addr, val);
2604 break;
2605 case 2:
2606 stw_user(addr, val);
2607 break;
2608 case 4:
2609 stl_user(addr, val);
2610 break;
2611 case 8:
2612 default:
2613 stq_user(addr, val);
2614 break;
2618 break;
2619 case 0x14: // Bypass
2620 case 0x15: // Bypass, non-cacheable
2621 case 0x1c: // Bypass LE
2622 case 0x1d: // Bypass, non-cacheable LE
2624 switch(size) {
2625 case 1:
2626 stb_phys(addr, val);
2627 break;
2628 case 2:
2629 stw_phys(addr, val);
2630 break;
2631 case 4:
2632 stl_phys(addr, val);
2633 break;
2634 case 8:
2635 default:
2636 stq_phys(addr, val);
2637 break;
2640 return;
2641 case 0x24: // Nucleus quad LDD 128 bit atomic
2642 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2643 // Only ldda allowed
2644 raise_exception(TT_ILL_INSN);
2645 return;
2646 case 0x04: // Nucleus
2647 case 0x0c: // Nucleus Little Endian (LE)
2649 switch(size) {
2650 case 1:
2651 stb_nucleus(addr, val);
2652 break;
2653 case 2:
2654 stw_nucleus(addr, val);
2655 break;
2656 case 4:
2657 stl_nucleus(addr, val);
2658 break;
2659 default:
2660 case 8:
2661 stq_nucleus(addr, val);
2662 break;
2664 break;
2667 case 0x4a: // UPA config
2668 // XXX
2669 return;
2670 case 0x45: // LSU
2672 uint64_t oldreg;
2674 oldreg = env->lsu;
2675 env->lsu = val & (DMMU_E | IMMU_E);
2676 // Mappings generated during D/I MMU disabled mode are
2677 // invalid in normal mode
2678 if (oldreg != env->lsu) {
2679 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
2680 oldreg, env->lsu);
2681 #ifdef DEBUG_MMU
2682 dump_mmu(env);
2683 #endif
2684 tlb_flush(env, 1);
2686 return;
2688 case 0x50: // I-MMU regs
2690 int reg = (addr >> 3) & 0xf;
2691 uint64_t oldreg;
2693 oldreg = env->immuregs[reg];
2694 switch(reg) {
2695 case 0: // RO
2696 return;
2697 case 1: // Not in I-MMU
2698 case 2:
2699 return;
2700 case 3: // SFSR
2701 if ((val & 1) == 0)
2702 val = 0; // Clear SFSR
2703 env->immu.sfsr = val;
2704 break;
2705 case 4: // RO
2706 return;
2707 case 5: // TSB access
2708 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
2709 PRIx64 "\n", env->immu.tsb, val);
2710 env->immu.tsb = val;
2711 break;
2712 case 6: // Tag access
2713 env->immu.tag_access = val;
2714 break;
2715 case 7:
2716 case 8:
2717 return;
2718 default:
2719 break;
2722 if (oldreg != env->immuregs[reg]) {
2723 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
2724 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
2726 #ifdef DEBUG_MMU
2727 dump_mmu(env);
2728 #endif
2729 return;
2731 case 0x54: // I-MMU data in
2732 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
2733 return;
2734 case 0x55: // I-MMU data access
2736 // TODO: auto demap
2738 unsigned int i = (addr >> 3) & 0x3f;
2740 replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
2742 #ifdef DEBUG_MMU
2743 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
2744 dump_mmu(env);
2745 #endif
2746 return;
2748 case 0x57: // I-MMU demap
2749 demap_tlb(env->itlb, val, "immu", env);
2750 return;
2751 case 0x58: // D-MMU regs
2753 int reg = (addr >> 3) & 0xf;
2754 uint64_t oldreg;
2756 oldreg = env->dmmuregs[reg];
2757 switch(reg) {
2758 case 0: // RO
2759 case 4:
2760 return;
2761 case 3: // SFSR
2762 if ((val & 1) == 0) {
2763 val = 0; // Clear SFSR, Fault address
2764 env->dmmu.sfar = 0;
2766 env->dmmu.sfsr = val;
2767 break;
2768 case 1: // Primary context
2769 env->dmmu.mmu_primary_context = val;
2770 break;
2771 case 2: // Secondary context
2772 env->dmmu.mmu_secondary_context = val;
2773 break;
2774 case 5: // TSB access
2775 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
2776 PRIx64 "\n", env->dmmu.tsb, val);
2777 env->dmmu.tsb = val;
2778 break;
2779 case 6: // Tag access
2780 env->dmmu.tag_access = val;
2781 break;
2782 case 7: // Virtual Watchpoint
2783 case 8: // Physical Watchpoint
2784 default:
2785 env->dmmuregs[reg] = val;
2786 break;
2789 if (oldreg != env->dmmuregs[reg]) {
2790 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
2791 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2793 #ifdef DEBUG_MMU
2794 dump_mmu(env);
2795 #endif
2796 return;
2798 case 0x5c: // D-MMU data in
2799 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
2800 return;
2801 case 0x5d: // D-MMU data access
2803 unsigned int i = (addr >> 3) & 0x3f;
2805 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
2807 #ifdef DEBUG_MMU
2808 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
2809 dump_mmu(env);
2810 #endif
2811 return;
2813 case 0x5f: // D-MMU demap
2814 demap_tlb(env->dtlb, val, "dmmu", env);
2815 return;
2816 case 0x49: // Interrupt data receive
2817 // XXX
2818 return;
2819 case 0x46: // D-cache data
2820 case 0x47: // D-cache tag access
2821 case 0x4b: // E-cache error enable
2822 case 0x4c: // E-cache asynchronous fault status
2823 case 0x4d: // E-cache asynchronous fault address
2824 case 0x4e: // E-cache tag data
2825 case 0x66: // I-cache instruction access
2826 case 0x67: // I-cache tag access
2827 case 0x6e: // I-cache predecode
2828 case 0x6f: // I-cache LRU etc.
2829 case 0x76: // E-cache tag
2830 case 0x7e: // E-cache tag
2831 return;
2832 case 0x51: // I-MMU 8k TSB pointer, RO
2833 case 0x52: // I-MMU 64k TSB pointer, RO
2834 case 0x56: // I-MMU tag read, RO
2835 case 0x59: // D-MMU 8k TSB pointer, RO
2836 case 0x5a: // D-MMU 64k TSB pointer, RO
2837 case 0x5b: // D-MMU data pointer, RO
2838 case 0x5e: // D-MMU tag read, RO
2839 case 0x48: // Interrupt dispatch, RO
2840 case 0x7f: // Incoming interrupt vector, RO
2841 case 0x82: // Primary no-fault, RO
2842 case 0x83: // Secondary no-fault, RO
2843 case 0x8a: // Primary no-fault LE, RO
2844 case 0x8b: // Secondary no-fault LE, RO
2845 default:
2846 do_unassigned_access(addr, 1, 0, 1, size);
2847 return;
2850 #endif /* CONFIG_USER_ONLY */
2852 void helper_ldda_asi(target_ulong addr, int asi, int rd)
2854 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2855 || ((env->def->features & CPU_FEATURE_HYPV)
2856 && asi >= 0x30 && asi < 0x80
2857 && !(env->hpstate & HS_PRIV)))
2858 raise_exception(TT_PRIV_ACT);
2860 switch (asi) {
2861 case 0x24: // Nucleus quad LDD 128 bit atomic
2862 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2863 helper_check_align(addr, 0xf);
2864 if (rd == 0) {
2865 env->gregs[1] = ldq_kernel(addr + 8);
2866 if (asi == 0x2c)
2867 bswap64s(&env->gregs[1]);
2868 } else if (rd < 8) {
2869 env->gregs[rd] = ldq_kernel(addr);
2870 env->gregs[rd + 1] = ldq_kernel(addr + 8);
2871 if (asi == 0x2c) {
2872 bswap64s(&env->gregs[rd]);
2873 bswap64s(&env->gregs[rd + 1]);
2875 } else {
2876 env->regwptr[rd] = ldq_kernel(addr);
2877 env->regwptr[rd + 1] = ldq_kernel(addr + 8);
2878 if (asi == 0x2c) {
2879 bswap64s(&env->regwptr[rd]);
2880 bswap64s(&env->regwptr[rd + 1]);
2883 break;
2884 default:
2885 helper_check_align(addr, 0x3);
2886 if (rd == 0)
2887 env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
2888 else if (rd < 8) {
2889 env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
2890 env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2891 } else {
2892 env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
2893 env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2895 break;
2899 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2901 unsigned int i;
2902 target_ulong val;
2904 helper_check_align(addr, 3);
2905 switch (asi) {
2906 case 0xf0: // Block load primary
2907 case 0xf1: // Block load secondary
2908 case 0xf8: // Block load primary LE
2909 case 0xf9: // Block load secondary LE
2910 if (rd & 7) {
2911 raise_exception(TT_ILL_INSN);
2912 return;
2914 helper_check_align(addr, 0x3f);
2915 for (i = 0; i < 16; i++) {
2916 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
2918 addr += 4;
2921 return;
2922 default:
2923 break;
2926 val = helper_ld_asi(addr, asi, size, 0);
2927 switch(size) {
2928 default:
2929 case 4:
2930 *((uint32_t *)&env->fpr[rd]) = val;
2931 break;
2932 case 8:
2933 *((int64_t *)&DT0) = val;
2934 break;
2935 case 16:
2936 // XXX
2937 break;
2941 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2943 unsigned int i;
2944 target_ulong val = 0;
2946 helper_check_align(addr, 3);
2947 switch (asi) {
2948 case 0xe0: // UA2007 Block commit store primary (cache flush)
2949 case 0xe1: // UA2007 Block commit store secondary (cache flush)
2950 case 0xf0: // Block store primary
2951 case 0xf1: // Block store secondary
2952 case 0xf8: // Block store primary LE
2953 case 0xf9: // Block store secondary LE
2954 if (rd & 7) {
2955 raise_exception(TT_ILL_INSN);
2956 return;
2958 helper_check_align(addr, 0x3f);
2959 for (i = 0; i < 16; i++) {
2960 val = *(uint32_t *)&env->fpr[rd++];
2961 helper_st_asi(addr, val, asi & 0x8f, 4);
2962 addr += 4;
2965 return;
2966 default:
2967 break;
2970 switch(size) {
2971 default:
2972 case 4:
2973 val = *((uint32_t *)&env->fpr[rd]);
2974 break;
2975 case 8:
2976 val = *((int64_t *)&DT0);
2977 break;
2978 case 16:
2979 // XXX
2980 break;
2982 helper_st_asi(addr, val, asi, size);
2985 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2986 target_ulong val2, uint32_t asi)
2988 target_ulong ret;
2990 val2 &= 0xffffffffUL;
2991 ret = helper_ld_asi(addr, asi, 4, 0);
2992 ret &= 0xffffffffUL;
2993 if (val2 == ret)
2994 helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
2995 return ret;
2998 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2999 target_ulong val2, uint32_t asi)
3001 target_ulong ret;
3003 ret = helper_ld_asi(addr, asi, 8, 0);
3004 if (val2 == ret)
3005 helper_st_asi(addr, val1, asi, 8);
3006 return ret;
3008 #endif /* TARGET_SPARC64 */
3010 #ifndef TARGET_SPARC64
3011 void helper_rett(void)
3013 unsigned int cwp;
3015 if (env->psret == 1)
3016 raise_exception(TT_ILL_INSN);
3018 env->psret = 1;
3019 cwp = cpu_cwp_inc(env, env->cwp + 1) ;
3020 if (env->wim & (1 << cwp)) {
3021 raise_exception(TT_WIN_UNF);
3023 set_cwp(cwp);
3024 env->psrs = env->psrps;
3026 #endif
3028 target_ulong helper_udiv(target_ulong a, target_ulong b)
3030 uint64_t x0;
3031 uint32_t x1;
3033 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3034 x1 = b;
3036 if (x1 == 0) {
3037 raise_exception(TT_DIV_ZERO);
3040 x0 = x0 / x1;
3041 if (x0 > 0xffffffff) {
3042 env->cc_src2 = 1;
3043 return 0xffffffff;
3044 } else {
3045 env->cc_src2 = 0;
3046 return x0;
3050 target_ulong helper_sdiv(target_ulong a, target_ulong b)
3052 int64_t x0;
3053 int32_t x1;
3055 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3056 x1 = b;
3058 if (x1 == 0) {
3059 raise_exception(TT_DIV_ZERO);
3062 x0 = x0 / x1;
3063 if ((int32_t) x0 != x0) {
3064 env->cc_src2 = 1;
3065 return x0 < 0? 0x80000000: 0x7fffffff;
3066 } else {
3067 env->cc_src2 = 0;
3068 return x0;
3072 void helper_stdf(target_ulong addr, int mem_idx)
3074 helper_check_align(addr, 7);
3075 #if !defined(CONFIG_USER_ONLY)
3076 switch (mem_idx) {
3077 case 0:
3078 stfq_user(addr, DT0);
3079 break;
3080 case 1:
3081 stfq_kernel(addr, DT0);
3082 break;
3083 #ifdef TARGET_SPARC64
3084 case 2:
3085 stfq_hypv(addr, DT0);
3086 break;
3087 #endif
3088 default:
3089 break;
3091 #else
3092 stfq_raw(address_mask(env, addr), DT0);
3093 #endif
3096 void helper_lddf(target_ulong addr, int mem_idx)
3098 helper_check_align(addr, 7);
3099 #if !defined(CONFIG_USER_ONLY)
3100 switch (mem_idx) {
3101 case 0:
3102 DT0 = ldfq_user(addr);
3103 break;
3104 case 1:
3105 DT0 = ldfq_kernel(addr);
3106 break;
3107 #ifdef TARGET_SPARC64
3108 case 2:
3109 DT0 = ldfq_hypv(addr);
3110 break;
3111 #endif
3112 default:
3113 break;
3115 #else
3116 DT0 = ldfq_raw(address_mask(env, addr));
3117 #endif
3120 void helper_ldqf(target_ulong addr, int mem_idx)
3122 // XXX add 128 bit load
3123 CPU_QuadU u;
3125 helper_check_align(addr, 7);
3126 #if !defined(CONFIG_USER_ONLY)
3127 switch (mem_idx) {
3128 case 0:
3129 u.ll.upper = ldq_user(addr);
3130 u.ll.lower = ldq_user(addr + 8);
3131 QT0 = u.q;
3132 break;
3133 case 1:
3134 u.ll.upper = ldq_kernel(addr);
3135 u.ll.lower = ldq_kernel(addr + 8);
3136 QT0 = u.q;
3137 break;
3138 #ifdef TARGET_SPARC64
3139 case 2:
3140 u.ll.upper = ldq_hypv(addr);
3141 u.ll.lower = ldq_hypv(addr + 8);
3142 QT0 = u.q;
3143 break;
3144 #endif
3145 default:
3146 break;
3148 #else
3149 u.ll.upper = ldq_raw(address_mask(env, addr));
3150 u.ll.lower = ldq_raw(address_mask(env, addr + 8));
3151 QT0 = u.q;
3152 #endif
3155 void helper_stqf(target_ulong addr, int mem_idx)
3157 // XXX add 128 bit store
3158 CPU_QuadU u;
3160 helper_check_align(addr, 7);
3161 #if !defined(CONFIG_USER_ONLY)
3162 switch (mem_idx) {
3163 case 0:
3164 u.q = QT0;
3165 stq_user(addr, u.ll.upper);
3166 stq_user(addr + 8, u.ll.lower);
3167 break;
3168 case 1:
3169 u.q = QT0;
3170 stq_kernel(addr, u.ll.upper);
3171 stq_kernel(addr + 8, u.ll.lower);
3172 break;
3173 #ifdef TARGET_SPARC64
3174 case 2:
3175 u.q = QT0;
3176 stq_hypv(addr, u.ll.upper);
3177 stq_hypv(addr + 8, u.ll.lower);
3178 break;
3179 #endif
3180 default:
3181 break;
3183 #else
3184 u.q = QT0;
3185 stq_raw(address_mask(env, addr), u.ll.upper);
3186 stq_raw(address_mask(env, addr + 8), u.ll.lower);
3187 #endif
3190 static inline void set_fsr(void)
3192 int rnd_mode;
3194 switch (env->fsr & FSR_RD_MASK) {
3195 case FSR_RD_NEAREST:
3196 rnd_mode = float_round_nearest_even;
3197 break;
3198 default:
3199 case FSR_RD_ZERO:
3200 rnd_mode = float_round_to_zero;
3201 break;
3202 case FSR_RD_POS:
3203 rnd_mode = float_round_up;
3204 break;
3205 case FSR_RD_NEG:
3206 rnd_mode = float_round_down;
3207 break;
3209 set_float_rounding_mode(rnd_mode, &env->fp_status);
3212 void helper_ldfsr(uint32_t new_fsr)
3214 env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
3215 set_fsr();
3218 #ifdef TARGET_SPARC64
3219 void helper_ldxfsr(uint64_t new_fsr)
3221 env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
3222 set_fsr();
3224 #endif
3226 void helper_debug(void)
3228 env->exception_index = EXCP_DEBUG;
3229 cpu_loop_exit();
3232 #ifndef TARGET_SPARC64
3233 /* XXX: use another pointer for %iN registers to avoid slow wrapping
3234 handling ? */
3235 void helper_save(void)
3237 uint32_t cwp;
3239 cwp = cpu_cwp_dec(env, env->cwp - 1);
3240 if (env->wim & (1 << cwp)) {
3241 raise_exception(TT_WIN_OVF);
3243 set_cwp(cwp);
3246 void helper_restore(void)
3248 uint32_t cwp;
3250 cwp = cpu_cwp_inc(env, env->cwp + 1);
3251 if (env->wim & (1 << cwp)) {
3252 raise_exception(TT_WIN_UNF);
3254 set_cwp(cwp);
3257 void helper_wrpsr(target_ulong new_psr)
3259 if ((new_psr & PSR_CWP) >= env->nwindows)
3260 raise_exception(TT_ILL_INSN);
3261 else
3262 PUT_PSR(env, new_psr);
3265 target_ulong helper_rdpsr(void)
3267 return GET_PSR(env);
3270 #else
3271 /* XXX: use another pointer for %iN registers to avoid slow wrapping
3272 handling ? */
3273 void helper_save(void)
3275 uint32_t cwp;
3277 cwp = cpu_cwp_dec(env, env->cwp - 1);
3278 if (env->cansave == 0) {
3279 raise_exception(TT_SPILL | (env->otherwin != 0 ?
3280 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3281 ((env->wstate & 0x7) << 2)));
3282 } else {
3283 if (env->cleanwin - env->canrestore == 0) {
3284 // XXX Clean windows without trap
3285 raise_exception(TT_CLRWIN);
3286 } else {
3287 env->cansave--;
3288 env->canrestore++;
3289 set_cwp(cwp);
3294 void helper_restore(void)
3296 uint32_t cwp;
3298 cwp = cpu_cwp_inc(env, env->cwp + 1);
3299 if (env->canrestore == 0) {
3300 raise_exception(TT_FILL | (env->otherwin != 0 ?
3301 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3302 ((env->wstate & 0x7) << 2)));
3303 } else {
3304 env->cansave++;
3305 env->canrestore--;
3306 set_cwp(cwp);
3310 void helper_flushw(void)
3312 if (env->cansave != env->nwindows - 2) {
3313 raise_exception(TT_SPILL | (env->otherwin != 0 ?
3314 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3315 ((env->wstate & 0x7) << 2)));
3319 void helper_saved(void)
3321 env->cansave++;
3322 if (env->otherwin == 0)
3323 env->canrestore--;
3324 else
3325 env->otherwin--;
3328 void helper_restored(void)
3330 env->canrestore++;
3331 if (env->cleanwin < env->nwindows - 1)
3332 env->cleanwin++;
3333 if (env->otherwin == 0)
3334 env->cansave--;
3335 else
3336 env->otherwin--;
3339 target_ulong helper_rdccr(void)
3341 return GET_CCR(env);
3344 void helper_wrccr(target_ulong new_ccr)
3346 PUT_CCR(env, new_ccr);
3349 // CWP handling is reversed in V9, but we still use the V8 register
3350 // order.
3351 target_ulong helper_rdcwp(void)
3353 return GET_CWP64(env);
3356 void helper_wrcwp(target_ulong new_cwp)
3358 PUT_CWP64(env, new_cwp);
3361 // This function uses non-native bit order
3362 #define GET_FIELD(X, FROM, TO) \
3363 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
3365 // This function uses the order in the manuals, i.e. bit 0 is 2^0
3366 #define GET_FIELD_SP(X, FROM, TO) \
3367 GET_FIELD(X, 63 - (TO), 63 - (FROM))
3369 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
3371 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
3372 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
3373 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
3374 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
3375 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
3376 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
3377 (((pixel_addr >> 55) & 1) << 4) |
3378 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
3379 GET_FIELD_SP(pixel_addr, 11, 12);
3382 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
3384 uint64_t tmp;
3386 tmp = addr + offset;
3387 env->gsr &= ~7ULL;
3388 env->gsr |= tmp & 7ULL;
3389 return tmp & ~7ULL;
3392 target_ulong helper_popc(target_ulong val)
3394 return ctpop64(val);
3397 static inline uint64_t *get_gregset(uint32_t pstate)
3399 switch (pstate) {
3400 default:
3401 DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
3402 pstate,
3403 (pstate & PS_IG) ? " IG" : "",
3404 (pstate & PS_MG) ? " MG" : "",
3405 (pstate & PS_AG) ? " AG" : "");
3406 /* pass through to normal set of global registers */
3407 case 0:
3408 return env->bgregs;
3409 case PS_AG:
3410 return env->agregs;
3411 case PS_MG:
3412 return env->mgregs;
3413 case PS_IG:
3414 return env->igregs;
3418 static inline void change_pstate(uint32_t new_pstate)
3420 uint32_t pstate_regs, new_pstate_regs;
3421 uint64_t *src, *dst;
3423 if (env->def->features & CPU_FEATURE_GL) {
3424 // PS_AG is not implemented in this case
3425 new_pstate &= ~PS_AG;
3428 pstate_regs = env->pstate & 0xc01;
3429 new_pstate_regs = new_pstate & 0xc01;
3431 if (new_pstate_regs != pstate_regs) {
3432 DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
3433 pstate_regs, new_pstate_regs);
3434 // Switch global register bank
3435 src = get_gregset(new_pstate_regs);
3436 dst = get_gregset(pstate_regs);
3437 memcpy32(dst, env->gregs);
3438 memcpy32(env->gregs, src);
3440 else {
3441 DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
3442 new_pstate_regs);
3444 env->pstate = new_pstate;
3447 void helper_wrpstate(target_ulong new_state)
3449 change_pstate(new_state & 0xf3f);
3451 #if !defined(CONFIG_USER_ONLY)
3452 if (cpu_interrupts_enabled(env)) {
3453 cpu_check_irqs(env);
3455 #endif
3458 void helper_wrpil(target_ulong new_pil)
3460 #if !defined(CONFIG_USER_ONLY)
3461 DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
3462 env->psrpil, (uint32_t)new_pil);
3464 env->psrpil = new_pil;
3466 if (cpu_interrupts_enabled(env)) {
3467 cpu_check_irqs(env);
3469 #endif
3472 void helper_done(void)
3474 trap_state* tsptr = cpu_tsptr(env);
3476 env->pc = tsptr->tnpc;
3477 env->npc = tsptr->tnpc + 4;
3478 PUT_CCR(env, tsptr->tstate >> 32);
3479 env->asi = (tsptr->tstate >> 24) & 0xff;
3480 change_pstate((tsptr->tstate >> 8) & 0xf3f);
3481 PUT_CWP64(env, tsptr->tstate & 0xff);
3482 env->tl--;
3484 DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl);
3486 #if !defined(CONFIG_USER_ONLY)
3487 if (cpu_interrupts_enabled(env)) {
3488 cpu_check_irqs(env);
3490 #endif
3493 void helper_retry(void)
3495 trap_state* tsptr = cpu_tsptr(env);
3497 env->pc = tsptr->tpc;
3498 env->npc = tsptr->tnpc;
3499 PUT_CCR(env, tsptr->tstate >> 32);
3500 env->asi = (tsptr->tstate >> 24) & 0xff;
3501 change_pstate((tsptr->tstate >> 8) & 0xf3f);
3502 PUT_CWP64(env, tsptr->tstate & 0xff);
3503 env->tl--;
3505 DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl);
3507 #if !defined(CONFIG_USER_ONLY)
3508 if (cpu_interrupts_enabled(env)) {
3509 cpu_check_irqs(env);
3511 #endif
3514 static void do_modify_softint(const char* operation, uint32_t value)
3516 if (env->softint != value) {
3517 env->softint = value;
3518 DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint);
3519 #if !defined(CONFIG_USER_ONLY)
3520 if (cpu_interrupts_enabled(env)) {
3521 cpu_check_irqs(env);
3523 #endif
3527 void helper_set_softint(uint64_t value)
3529 do_modify_softint("helper_set_softint", env->softint | (uint32_t)value);
3532 void helper_clear_softint(uint64_t value)
3534 do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value);
3537 void helper_write_softint(uint64_t value)
3539 do_modify_softint("helper_write_softint", (uint32_t)value);
3541 #endif
3543 void helper_flush(target_ulong addr)
3545 addr &= ~7;
3546 tb_invalidate_page_range(addr, addr + 8);
3549 #ifdef TARGET_SPARC64
3550 #ifdef DEBUG_PCALL
3551 static const char * const excp_names[0x80] = {
3552 [TT_TFAULT] = "Instruction Access Fault",
3553 [TT_TMISS] = "Instruction Access MMU Miss",
3554 [TT_CODE_ACCESS] = "Instruction Access Error",
3555 [TT_ILL_INSN] = "Illegal Instruction",
3556 [TT_PRIV_INSN] = "Privileged Instruction",
3557 [TT_NFPU_INSN] = "FPU Disabled",
3558 [TT_FP_EXCP] = "FPU Exception",
3559 [TT_TOVF] = "Tag Overflow",
3560 [TT_CLRWIN] = "Clean Windows",
3561 [TT_DIV_ZERO] = "Division By Zero",
3562 [TT_DFAULT] = "Data Access Fault",
3563 [TT_DMISS] = "Data Access MMU Miss",
3564 [TT_DATA_ACCESS] = "Data Access Error",
3565 [TT_DPROT] = "Data Protection Error",
3566 [TT_UNALIGNED] = "Unaligned Memory Access",
3567 [TT_PRIV_ACT] = "Privileged Action",
3568 [TT_EXTINT | 0x1] = "External Interrupt 1",
3569 [TT_EXTINT | 0x2] = "External Interrupt 2",
3570 [TT_EXTINT | 0x3] = "External Interrupt 3",
3571 [TT_EXTINT | 0x4] = "External Interrupt 4",
3572 [TT_EXTINT | 0x5] = "External Interrupt 5",
3573 [TT_EXTINT | 0x6] = "External Interrupt 6",
3574 [TT_EXTINT | 0x7] = "External Interrupt 7",
3575 [TT_EXTINT | 0x8] = "External Interrupt 8",
3576 [TT_EXTINT | 0x9] = "External Interrupt 9",
3577 [TT_EXTINT | 0xa] = "External Interrupt 10",
3578 [TT_EXTINT | 0xb] = "External Interrupt 11",
3579 [TT_EXTINT | 0xc] = "External Interrupt 12",
3580 [TT_EXTINT | 0xd] = "External Interrupt 13",
3581 [TT_EXTINT | 0xe] = "External Interrupt 14",
3582 [TT_EXTINT | 0xf] = "External Interrupt 15",
3584 #endif
3586 trap_state* cpu_tsptr(CPUState* env)
3588 return &env->ts[env->tl & MAXTL_MASK];
3591 void do_interrupt(CPUState *env)
3593 int intno = env->exception_index;
3594 trap_state* tsptr;
3596 #ifdef DEBUG_PCALL
3597 if (qemu_loglevel_mask(CPU_LOG_INT)) {
3598 static int count;
3599 const char *name;
3601 if (intno < 0 || intno >= 0x180)
3602 name = "Unknown";
3603 else if (intno >= 0x100)
3604 name = "Trap Instruction";
3605 else if (intno >= 0xc0)
3606 name = "Window Fill";
3607 else if (intno >= 0x80)
3608 name = "Window Spill";
3609 else {
3610 name = excp_names[intno];
3611 if (!name)
3612 name = "Unknown";
3615 qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
3616 " SP=%016" PRIx64 "\n",
3617 count, name, intno,
3618 env->pc,
3619 env->npc, env->regwptr[6]);
3620 log_cpu_state(env, 0);
3621 #if 0
3623 int i;
3624 uint8_t *ptr;
3626 qemu_log(" code=");
3627 ptr = (uint8_t *)env->pc;
3628 for(i = 0; i < 16; i++) {
3629 qemu_log(" %02x", ldub(ptr + i));
3631 qemu_log("\n");
3633 #endif
3634 count++;
3636 #endif
3637 #if !defined(CONFIG_USER_ONLY)
3638 if (env->tl >= env->maxtl) {
3639 cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
3640 " Error state", env->exception_index, env->tl, env->maxtl);
3641 return;
3643 #endif
3644 if (env->tl < env->maxtl - 1) {
3645 env->tl++;
3646 } else {
3647 env->pstate |= PS_RED;
3648 if (env->tl < env->maxtl)
3649 env->tl++;
3651 tsptr = cpu_tsptr(env);
3653 tsptr->tstate = ((uint64_t)GET_CCR(env) << 32) |
3654 ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
3655 GET_CWP64(env);
3656 tsptr->tpc = env->pc;
3657 tsptr->tnpc = env->npc;
3658 tsptr->tt = intno;
3660 switch (intno) {
3661 case TT_IVEC:
3662 change_pstate(PS_PEF | PS_PRIV | PS_IG);
3663 break;
3664 case TT_TFAULT:
3665 case TT_DFAULT:
3666 case TT_TMISS ... TT_TMISS + 3:
3667 case TT_DMISS ... TT_DMISS + 3:
3668 case TT_DPROT ... TT_DPROT + 3:
3669 change_pstate(PS_PEF | PS_PRIV | PS_MG);
3670 break;
3671 default:
3672 change_pstate(PS_PEF | PS_PRIV | PS_AG);
3673 break;
3676 if (intno == TT_CLRWIN)
3677 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
3678 else if ((intno & 0x1c0) == TT_SPILL)
3679 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
3680 else if ((intno & 0x1c0) == TT_FILL)
3681 cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
3682 env->tbr &= ~0x7fffULL;
3683 env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
3684 env->pc = env->tbr;
3685 env->npc = env->pc + 4;
3686 env->exception_index = -1;
3688 #else
3689 #ifdef DEBUG_PCALL
3690 static const char * const excp_names[0x80] = {
3691 [TT_TFAULT] = "Instruction Access Fault",
3692 [TT_ILL_INSN] = "Illegal Instruction",
3693 [TT_PRIV_INSN] = "Privileged Instruction",
3694 [TT_NFPU_INSN] = "FPU Disabled",
3695 [TT_WIN_OVF] = "Window Overflow",
3696 [TT_WIN_UNF] = "Window Underflow",
3697 [TT_UNALIGNED] = "Unaligned Memory Access",
3698 [TT_FP_EXCP] = "FPU Exception",
3699 [TT_DFAULT] = "Data Access Fault",
3700 [TT_TOVF] = "Tag Overflow",
3701 [TT_EXTINT | 0x1] = "External Interrupt 1",
3702 [TT_EXTINT | 0x2] = "External Interrupt 2",
3703 [TT_EXTINT | 0x3] = "External Interrupt 3",
3704 [TT_EXTINT | 0x4] = "External Interrupt 4",
3705 [TT_EXTINT | 0x5] = "External Interrupt 5",
3706 [TT_EXTINT | 0x6] = "External Interrupt 6",
3707 [TT_EXTINT | 0x7] = "External Interrupt 7",
3708 [TT_EXTINT | 0x8] = "External Interrupt 8",
3709 [TT_EXTINT | 0x9] = "External Interrupt 9",
3710 [TT_EXTINT | 0xa] = "External Interrupt 10",
3711 [TT_EXTINT | 0xb] = "External Interrupt 11",
3712 [TT_EXTINT | 0xc] = "External Interrupt 12",
3713 [TT_EXTINT | 0xd] = "External Interrupt 13",
3714 [TT_EXTINT | 0xe] = "External Interrupt 14",
3715 [TT_EXTINT | 0xf] = "External Interrupt 15",
3716 [TT_TOVF] = "Tag Overflow",
3717 [TT_CODE_ACCESS] = "Instruction Access Error",
3718 [TT_DATA_ACCESS] = "Data Access Error",
3719 [TT_DIV_ZERO] = "Division By Zero",
3720 [TT_NCP_INSN] = "Coprocessor Disabled",
3722 #endif
3724 void do_interrupt(CPUState *env)
3726 int cwp, intno = env->exception_index;
3728 #ifdef DEBUG_PCALL
3729 if (qemu_loglevel_mask(CPU_LOG_INT)) {
3730 static int count;
3731 const char *name;
3733 if (intno < 0 || intno >= 0x100)
3734 name = "Unknown";
3735 else if (intno >= 0x80)
3736 name = "Trap Instruction";
3737 else {
3738 name = excp_names[intno];
3739 if (!name)
3740 name = "Unknown";
3743 qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
3744 count, name, intno,
3745 env->pc,
3746 env->npc, env->regwptr[6]);
3747 log_cpu_state(env, 0);
3748 #if 0
3750 int i;
3751 uint8_t *ptr;
3753 qemu_log(" code=");
3754 ptr = (uint8_t *)env->pc;
3755 for(i = 0; i < 16; i++) {
3756 qemu_log(" %02x", ldub(ptr + i));
3758 qemu_log("\n");
3760 #endif
3761 count++;
3763 #endif
3764 #if !defined(CONFIG_USER_ONLY)
3765 if (env->psret == 0) {
3766 cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
3767 env->exception_index);
3768 return;
3770 #endif
3771 env->psret = 0;
3772 cwp = cpu_cwp_dec(env, env->cwp - 1);
3773 cpu_set_cwp(env, cwp);
3774 env->regwptr[9] = env->pc;
3775 env->regwptr[10] = env->npc;
3776 env->psrps = env->psrs;
3777 env->psrs = 1;
3778 env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
3779 env->pc = env->tbr;
3780 env->npc = env->pc + 4;
3781 env->exception_index = -1;
3783 #endif
3785 #if !defined(CONFIG_USER_ONLY)
3787 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3788 void *retaddr);
3790 #define MMUSUFFIX _mmu
3791 #define ALIGNED_ONLY
3793 #define SHIFT 0
3794 #include "softmmu_template.h"
3796 #define SHIFT 1
3797 #include "softmmu_template.h"
3799 #define SHIFT 2
3800 #include "softmmu_template.h"
3802 #define SHIFT 3
3803 #include "softmmu_template.h"
3805 /* XXX: make it generic ? */
3806 static void cpu_restore_state2(void *retaddr)
3808 TranslationBlock *tb;
3809 unsigned long pc;
3811 if (retaddr) {
3812 /* now we have a real cpu fault */
3813 pc = (unsigned long)retaddr;
3814 tb = tb_find_pc(pc);
3815 if (tb) {
3816 /* the PC is inside the translated code. It means that we have
3817 a virtual CPU fault */
3818 cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
3823 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3824 void *retaddr)
3826 #ifdef DEBUG_UNALIGNED
3827 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
3828 "\n", addr, env->pc);
3829 #endif
3830 cpu_restore_state2(retaddr);
3831 raise_exception(TT_UNALIGNED);
3834 /* try to fill the TLB and return an exception if error. If retaddr is
3835 NULL, it means that the function was called in C code (i.e. not
3836 from generated code or from helper.c) */
3837 /* XXX: fix it to restore all registers */
3838 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3840 int ret;
3841 CPUState *saved_env;
3843 /* XXX: hack to restore env in all cases, even if not called from
3844 generated code */
3845 saved_env = env;
3846 env = cpu_single_env;
3848 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3849 if (ret) {
3850 cpu_restore_state2(retaddr);
3851 cpu_loop_exit();
3853 env = saved_env;
3856 #endif /* !CONFIG_USER_ONLY */
3858 #ifndef TARGET_SPARC64
3859 #if !defined(CONFIG_USER_ONLY)
3860 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3861 int is_asi, int size)
3863 CPUState *saved_env;
3864 int fault_type;
3866 /* XXX: hack to restore env in all cases, even if not called from
3867 generated code */
3868 saved_env = env;
3869 env = cpu_single_env;
3870 #ifdef DEBUG_UNASSIGNED
3871 if (is_asi)
3872 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3873 " asi 0x%02x from " TARGET_FMT_lx "\n",
3874 is_exec ? "exec" : is_write ? "write" : "read", size,
3875 size == 1 ? "" : "s", addr, is_asi, env->pc);
3876 else
3877 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3878 " from " TARGET_FMT_lx "\n",
3879 is_exec ? "exec" : is_write ? "write" : "read", size,
3880 size == 1 ? "" : "s", addr, env->pc);
3881 #endif
3882 /* Don't overwrite translation and access faults */
3883 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
3884 if ((fault_type > 4) || (fault_type == 0)) {
3885 env->mmuregs[3] = 0; /* Fault status register */
3886 if (is_asi)
3887 env->mmuregs[3] |= 1 << 16;
3888 if (env->psrs)
3889 env->mmuregs[3] |= 1 << 5;
3890 if (is_exec)
3891 env->mmuregs[3] |= 1 << 6;
3892 if (is_write)
3893 env->mmuregs[3] |= 1 << 7;
3894 env->mmuregs[3] |= (5 << 2) | 2;
3895 /* SuperSPARC will never place instruction fault addresses in the FAR */
3896 if (!is_exec) {
3897 env->mmuregs[4] = addr; /* Fault address register */
3900 /* overflow (same type fault was not read before another fault) */
3901 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
3902 env->mmuregs[3] |= 1;
3905 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
3906 if (is_exec)
3907 raise_exception(TT_CODE_ACCESS);
3908 else
3909 raise_exception(TT_DATA_ACCESS);
3912 /* flush neverland mappings created during no-fault mode,
3913 so the sequential MMU faults report proper fault types */
3914 if (env->mmuregs[0] & MMU_NF) {
3915 tlb_flush(env, 1);
3918 env = saved_env;
3920 #endif
3921 #else
3922 #if defined(CONFIG_USER_ONLY)
3923 static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
3924 int is_asi, int size)
3925 #else
3926 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3927 int is_asi, int size)
3928 #endif
3930 CPUState *saved_env;
3932 /* XXX: hack to restore env in all cases, even if not called from
3933 generated code */
3934 saved_env = env;
3935 env = cpu_single_env;
3937 #ifdef DEBUG_UNASSIGNED
3938 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
3939 "\n", addr, env->pc);
3940 #endif
3942 if (is_exec)
3943 raise_exception(TT_CODE_ACCESS);
3944 else
3945 raise_exception(TT_DATA_ACCESS);
3947 env = saved_env;
3949 #endif
3952 #ifdef TARGET_SPARC64
3953 void helper_tick_set_count(void *opaque, uint64_t count)
3955 #if !defined(CONFIG_USER_ONLY)
3956 cpu_tick_set_count(opaque, count);
3957 #endif
3960 uint64_t helper_tick_get_count(void *opaque)
3962 #if !defined(CONFIG_USER_ONLY)
3963 return cpu_tick_get_count(opaque);
3964 #else
3965 return 0;
3966 #endif
3969 void helper_tick_set_limit(void *opaque, uint64_t limit)
3971 #if !defined(CONFIG_USER_ONLY)
3972 cpu_tick_set_limit(opaque, limit);
3973 #endif
3975 #endif