Fix arguments used in cas/casx, thanks to Igor Kovalenko for spotting
[qemu/mini2440.git] / target-sparc / op_helper.c
blob163e82b902c30c188a0bd44546a77a10e64d9a5d
1 #include "exec.h"
2 #include "host-utils.h"
3 #include "helper.h"
4 #if !defined(CONFIG_USER_ONLY)
5 #include "softmmu_exec.h"
6 #endif /* !defined(CONFIG_USER_ONLY) */
8 //#define DEBUG_MMU
9 //#define DEBUG_MXCC
10 //#define DEBUG_UNALIGNED
11 //#define DEBUG_UNASSIGNED
12 //#define DEBUG_ASI
14 #ifdef DEBUG_MMU
15 #define DPRINTF_MMU(fmt, args...) \
16 do { printf("MMU: " fmt , ##args); } while (0)
17 #else
18 #define DPRINTF_MMU(fmt, args...) do {} while (0)
19 #endif
21 #ifdef DEBUG_MXCC
22 #define DPRINTF_MXCC(fmt, args...) \
23 do { printf("MXCC: " fmt , ##args); } while (0)
24 #else
25 #define DPRINTF_MXCC(fmt, args...) do {} while (0)
26 #endif
28 #ifdef DEBUG_ASI
29 #define DPRINTF_ASI(fmt, args...) \
30 do { printf("ASI: " fmt , ##args); } while (0)
31 #else
32 #define DPRINTF_ASI(fmt, args...) do {} while (0)
33 #endif
35 #ifdef TARGET_SPARC64
36 #ifndef TARGET_ABI32
37 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
38 #else
39 #define AM_CHECK(env1) (1)
40 #endif
41 #endif
43 static inline void address_mask(CPUState *env1, target_ulong *addr)
45 #ifdef TARGET_SPARC64
46 if (AM_CHECK(env1))
47 *addr &= 0xffffffffULL;
48 #endif
51 void raise_exception(int tt)
53 env->exception_index = tt;
54 cpu_loop_exit();
57 void helper_trap(target_ulong nb_trap)
59 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
60 cpu_loop_exit();
63 void helper_trapcc(target_ulong nb_trap, target_ulong do_trap)
65 if (do_trap) {
66 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
67 cpu_loop_exit();
71 static inline void set_cwp(int new_cwp)
73 cpu_set_cwp(env, new_cwp);
76 void helper_check_align(target_ulong addr, uint32_t align)
78 if (addr & align) {
79 #ifdef DEBUG_UNALIGNED
80 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
81 "\n", addr, env->pc);
82 #endif
83 raise_exception(TT_UNALIGNED);
87 #define F_HELPER(name, p) void helper_f##name##p(void)
89 #define F_BINOP(name) \
90 float32 helper_f ## name ## s (float32 src1, float32 src2) \
91 { \
92 return float32_ ## name (src1, src2, &env->fp_status); \
93 } \
94 F_HELPER(name, d) \
95 { \
96 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
97 } \
98 F_HELPER(name, q) \
99 { \
100 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
103 F_BINOP(add);
104 F_BINOP(sub);
105 F_BINOP(mul);
106 F_BINOP(div);
107 #undef F_BINOP
109 void helper_fsmuld(float32 src1, float32 src2)
111 DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
112 float32_to_float64(src2, &env->fp_status),
113 &env->fp_status);
116 void helper_fdmulq(void)
118 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
119 float64_to_float128(DT1, &env->fp_status),
120 &env->fp_status);
123 float32 helper_fnegs(float32 src)
125 return float32_chs(src);
128 #ifdef TARGET_SPARC64
129 F_HELPER(neg, d)
131 DT0 = float64_chs(DT1);
134 F_HELPER(neg, q)
136 QT0 = float128_chs(QT1);
138 #endif
140 /* Integer to float conversion. */
141 float32 helper_fitos(int32_t src)
143 return int32_to_float32(src, &env->fp_status);
146 void helper_fitod(int32_t src)
148 DT0 = int32_to_float64(src, &env->fp_status);
151 void helper_fitoq(int32_t src)
153 QT0 = int32_to_float128(src, &env->fp_status);
156 #ifdef TARGET_SPARC64
157 float32 helper_fxtos(void)
159 return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
162 F_HELPER(xto, d)
164 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
167 F_HELPER(xto, q)
169 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
171 #endif
172 #undef F_HELPER
174 /* floating point conversion */
175 float32 helper_fdtos(void)
177 return float64_to_float32(DT1, &env->fp_status);
180 void helper_fstod(float32 src)
182 DT0 = float32_to_float64(src, &env->fp_status);
185 float32 helper_fqtos(void)
187 return float128_to_float32(QT1, &env->fp_status);
190 void helper_fstoq(float32 src)
192 QT0 = float32_to_float128(src, &env->fp_status);
195 void helper_fqtod(void)
197 DT0 = float128_to_float64(QT1, &env->fp_status);
200 void helper_fdtoq(void)
202 QT0 = float64_to_float128(DT1, &env->fp_status);
205 /* Float to integer conversion. */
206 int32_t helper_fstoi(float32 src)
208 return float32_to_int32_round_to_zero(src, &env->fp_status);
211 int32_t helper_fdtoi(void)
213 return float64_to_int32_round_to_zero(DT1, &env->fp_status);
216 int32_t helper_fqtoi(void)
218 return float128_to_int32_round_to_zero(QT1, &env->fp_status);
221 #ifdef TARGET_SPARC64
222 void helper_fstox(float32 src)
224 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
227 void helper_fdtox(void)
229 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
232 void helper_fqtox(void)
234 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
237 void helper_faligndata(void)
239 uint64_t tmp;
241 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
242 /* on many architectures a shift of 64 does nothing */
243 if ((env->gsr & 7) != 0) {
244 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
246 *((uint64_t *)&DT0) = tmp;
249 #ifdef WORDS_BIGENDIAN
250 #define VIS_B64(n) b[7 - (n)]
251 #define VIS_W64(n) w[3 - (n)]
252 #define VIS_SW64(n) sw[3 - (n)]
253 #define VIS_L64(n) l[1 - (n)]
254 #define VIS_B32(n) b[3 - (n)]
255 #define VIS_W32(n) w[1 - (n)]
256 #else
257 #define VIS_B64(n) b[n]
258 #define VIS_W64(n) w[n]
259 #define VIS_SW64(n) sw[n]
260 #define VIS_L64(n) l[n]
261 #define VIS_B32(n) b[n]
262 #define VIS_W32(n) w[n]
263 #endif
265 typedef union {
266 uint8_t b[8];
267 uint16_t w[4];
268 int16_t sw[4];
269 uint32_t l[2];
270 float64 d;
271 } vis64;
273 typedef union {
274 uint8_t b[4];
275 uint16_t w[2];
276 uint32_t l;
277 float32 f;
278 } vis32;
280 void helper_fpmerge(void)
282 vis64 s, d;
284 s.d = DT0;
285 d.d = DT1;
287 // Reverse calculation order to handle overlap
288 d.VIS_B64(7) = s.VIS_B64(3);
289 d.VIS_B64(6) = d.VIS_B64(3);
290 d.VIS_B64(5) = s.VIS_B64(2);
291 d.VIS_B64(4) = d.VIS_B64(2);
292 d.VIS_B64(3) = s.VIS_B64(1);
293 d.VIS_B64(2) = d.VIS_B64(1);
294 d.VIS_B64(1) = s.VIS_B64(0);
295 //d.VIS_B64(0) = d.VIS_B64(0);
297 DT0 = d.d;
300 void helper_fmul8x16(void)
302 vis64 s, d;
303 uint32_t tmp;
305 s.d = DT0;
306 d.d = DT1;
308 #define PMUL(r) \
309 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
310 if ((tmp & 0xff) > 0x7f) \
311 tmp += 0x100; \
312 d.VIS_W64(r) = tmp >> 8;
314 PMUL(0);
315 PMUL(1);
316 PMUL(2);
317 PMUL(3);
318 #undef PMUL
320 DT0 = d.d;
323 void helper_fmul8x16al(void)
325 vis64 s, d;
326 uint32_t tmp;
328 s.d = DT0;
329 d.d = DT1;
331 #define PMUL(r) \
332 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
333 if ((tmp & 0xff) > 0x7f) \
334 tmp += 0x100; \
335 d.VIS_W64(r) = tmp >> 8;
337 PMUL(0);
338 PMUL(1);
339 PMUL(2);
340 PMUL(3);
341 #undef PMUL
343 DT0 = d.d;
346 void helper_fmul8x16au(void)
348 vis64 s, d;
349 uint32_t tmp;
351 s.d = DT0;
352 d.d = DT1;
354 #define PMUL(r) \
355 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
356 if ((tmp & 0xff) > 0x7f) \
357 tmp += 0x100; \
358 d.VIS_W64(r) = tmp >> 8;
360 PMUL(0);
361 PMUL(1);
362 PMUL(2);
363 PMUL(3);
364 #undef PMUL
366 DT0 = d.d;
369 void helper_fmul8sux16(void)
371 vis64 s, d;
372 uint32_t tmp;
374 s.d = DT0;
375 d.d = DT1;
377 #define PMUL(r) \
378 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
379 if ((tmp & 0xff) > 0x7f) \
380 tmp += 0x100; \
381 d.VIS_W64(r) = tmp >> 8;
383 PMUL(0);
384 PMUL(1);
385 PMUL(2);
386 PMUL(3);
387 #undef PMUL
389 DT0 = d.d;
392 void helper_fmul8ulx16(void)
394 vis64 s, d;
395 uint32_t tmp;
397 s.d = DT0;
398 d.d = DT1;
400 #define PMUL(r) \
401 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
402 if ((tmp & 0xff) > 0x7f) \
403 tmp += 0x100; \
404 d.VIS_W64(r) = tmp >> 8;
406 PMUL(0);
407 PMUL(1);
408 PMUL(2);
409 PMUL(3);
410 #undef PMUL
412 DT0 = d.d;
415 void helper_fmuld8sux16(void)
417 vis64 s, d;
418 uint32_t tmp;
420 s.d = DT0;
421 d.d = DT1;
423 #define PMUL(r) \
424 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
425 if ((tmp & 0xff) > 0x7f) \
426 tmp += 0x100; \
427 d.VIS_L64(r) = tmp;
429 // Reverse calculation order to handle overlap
430 PMUL(1);
431 PMUL(0);
432 #undef PMUL
434 DT0 = d.d;
437 void helper_fmuld8ulx16(void)
439 vis64 s, d;
440 uint32_t tmp;
442 s.d = DT0;
443 d.d = DT1;
445 #define PMUL(r) \
446 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
447 if ((tmp & 0xff) > 0x7f) \
448 tmp += 0x100; \
449 d.VIS_L64(r) = tmp;
451 // Reverse calculation order to handle overlap
452 PMUL(1);
453 PMUL(0);
454 #undef PMUL
456 DT0 = d.d;
459 void helper_fexpand(void)
461 vis32 s;
462 vis64 d;
464 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
465 d.d = DT1;
466 d.VIS_L64(0) = s.VIS_W32(0) << 4;
467 d.VIS_L64(1) = s.VIS_W32(1) << 4;
468 d.VIS_L64(2) = s.VIS_W32(2) << 4;
469 d.VIS_L64(3) = s.VIS_W32(3) << 4;
471 DT0 = d.d;
474 #define VIS_HELPER(name, F) \
475 void name##16(void) \
477 vis64 s, d; \
479 s.d = DT0; \
480 d.d = DT1; \
482 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
483 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
484 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
485 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
487 DT0 = d.d; \
490 uint32_t name##16s(uint32_t src1, uint32_t src2) \
492 vis32 s, d; \
494 s.l = src1; \
495 d.l = src2; \
497 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
498 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
500 return d.l; \
503 void name##32(void) \
505 vis64 s, d; \
507 s.d = DT0; \
508 d.d = DT1; \
510 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
511 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
513 DT0 = d.d; \
516 uint32_t name##32s(uint32_t src1, uint32_t src2) \
518 vis32 s, d; \
520 s.l = src1; \
521 d.l = src2; \
523 d.l = F(d.l, s.l); \
525 return d.l; \
528 #define FADD(a, b) ((a) + (b))
529 #define FSUB(a, b) ((a) - (b))
530 VIS_HELPER(helper_fpadd, FADD)
531 VIS_HELPER(helper_fpsub, FSUB)
533 #define VIS_CMPHELPER(name, F) \
534 void name##16(void) \
536 vis64 s, d; \
538 s.d = DT0; \
539 d.d = DT1; \
541 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
542 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
543 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
544 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
546 DT0 = d.d; \
549 void name##32(void) \
551 vis64 s, d; \
553 s.d = DT0; \
554 d.d = DT1; \
556 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
557 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
559 DT0 = d.d; \
562 #define FCMPGT(a, b) ((a) > (b))
563 #define FCMPEQ(a, b) ((a) == (b))
564 #define FCMPLE(a, b) ((a) <= (b))
565 #define FCMPNE(a, b) ((a) != (b))
567 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
568 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
569 VIS_CMPHELPER(helper_fcmple, FCMPLE)
570 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
571 #endif
573 void helper_check_ieee_exceptions(void)
575 target_ulong status;
577 status = get_float_exception_flags(&env->fp_status);
578 if (status) {
579 /* Copy IEEE 754 flags into FSR */
580 if (status & float_flag_invalid)
581 env->fsr |= FSR_NVC;
582 if (status & float_flag_overflow)
583 env->fsr |= FSR_OFC;
584 if (status & float_flag_underflow)
585 env->fsr |= FSR_UFC;
586 if (status & float_flag_divbyzero)
587 env->fsr |= FSR_DZC;
588 if (status & float_flag_inexact)
589 env->fsr |= FSR_NXC;
591 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
592 /* Unmasked exception, generate a trap */
593 env->fsr |= FSR_FTT_IEEE_EXCP;
594 raise_exception(TT_FP_EXCP);
595 } else {
596 /* Accumulate exceptions */
597 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
602 void helper_clear_float_exceptions(void)
604 set_float_exception_flags(0, &env->fp_status);
607 float32 helper_fabss(float32 src)
609 return float32_abs(src);
612 #ifdef TARGET_SPARC64
613 void helper_fabsd(void)
615 DT0 = float64_abs(DT1);
618 void helper_fabsq(void)
620 QT0 = float128_abs(QT1);
622 #endif
624 float32 helper_fsqrts(float32 src)
626 return float32_sqrt(src, &env->fp_status);
629 void helper_fsqrtd(void)
631 DT0 = float64_sqrt(DT1, &env->fp_status);
634 void helper_fsqrtq(void)
636 QT0 = float128_sqrt(QT1, &env->fp_status);
639 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
640 void glue(helper_, name) (void) \
642 target_ulong new_fsr; \
644 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
645 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
646 case float_relation_unordered: \
647 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
648 if ((env->fsr & FSR_NVM) || TRAP) { \
649 env->fsr |= new_fsr; \
650 env->fsr |= FSR_NVC; \
651 env->fsr |= FSR_FTT_IEEE_EXCP; \
652 raise_exception(TT_FP_EXCP); \
653 } else { \
654 env->fsr |= FSR_NVA; \
656 break; \
657 case float_relation_less: \
658 new_fsr = FSR_FCC0 << FS; \
659 break; \
660 case float_relation_greater: \
661 new_fsr = FSR_FCC1 << FS; \
662 break; \
663 default: \
664 new_fsr = 0; \
665 break; \
667 env->fsr |= new_fsr; \
669 #define GEN_FCMPS(name, size, FS, TRAP) \
670 void glue(helper_, name)(float32 src1, float32 src2) \
672 target_ulong new_fsr; \
674 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
675 switch (glue(size, _compare) (src1, src2, &env->fp_status)) { \
676 case float_relation_unordered: \
677 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
678 if ((env->fsr & FSR_NVM) || TRAP) { \
679 env->fsr |= new_fsr; \
680 env->fsr |= FSR_NVC; \
681 env->fsr |= FSR_FTT_IEEE_EXCP; \
682 raise_exception(TT_FP_EXCP); \
683 } else { \
684 env->fsr |= FSR_NVA; \
686 break; \
687 case float_relation_less: \
688 new_fsr = FSR_FCC0 << FS; \
689 break; \
690 case float_relation_greater: \
691 new_fsr = FSR_FCC1 << FS; \
692 break; \
693 default: \
694 new_fsr = 0; \
695 break; \
697 env->fsr |= new_fsr; \
700 GEN_FCMPS(fcmps, float32, 0, 0);
701 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
703 GEN_FCMPS(fcmpes, float32, 0, 1);
704 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
706 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
707 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
709 #ifdef TARGET_SPARC64
710 GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
711 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
712 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
714 GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
715 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
716 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
718 GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
719 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
720 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
722 GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
723 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
724 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
726 GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
727 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
728 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
730 GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
731 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
732 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
733 #endif
734 #undef GEN_FCMPS
736 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
737 defined(DEBUG_MXCC)
738 static void dump_mxcc(CPUState *env)
740 printf("mxccdata: %016llx %016llx %016llx %016llx\n",
741 env->mxccdata[0], env->mxccdata[1],
742 env->mxccdata[2], env->mxccdata[3]);
743 printf("mxccregs: %016llx %016llx %016llx %016llx\n"
744 " %016llx %016llx %016llx %016llx\n",
745 env->mxccregs[0], env->mxccregs[1],
746 env->mxccregs[2], env->mxccregs[3],
747 env->mxccregs[4], env->mxccregs[5],
748 env->mxccregs[6], env->mxccregs[7]);
750 #endif
752 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
753 && defined(DEBUG_ASI)
754 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
755 uint64_t r1)
757 switch (size)
759 case 1:
760 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
761 addr, asi, r1 & 0xff);
762 break;
763 case 2:
764 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
765 addr, asi, r1 & 0xffff);
766 break;
767 case 4:
768 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
769 addr, asi, r1 & 0xffffffff);
770 break;
771 case 8:
772 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
773 addr, asi, r1);
774 break;
777 #endif
779 #ifndef TARGET_SPARC64
780 #ifndef CONFIG_USER_ONLY
781 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
783 uint64_t ret = 0;
784 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
785 uint32_t last_addr = addr;
786 #endif
788 helper_check_align(addr, size - 1);
789 switch (asi) {
790 case 2: /* SuperSparc MXCC registers */
791 switch (addr) {
792 case 0x01c00a00: /* MXCC control register */
793 if (size == 8)
794 ret = env->mxccregs[3];
795 else
796 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
797 size);
798 break;
799 case 0x01c00a04: /* MXCC control register */
800 if (size == 4)
801 ret = env->mxccregs[3];
802 else
803 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
804 size);
805 break;
806 case 0x01c00c00: /* Module reset register */
807 if (size == 8) {
808 ret = env->mxccregs[5];
809 // should we do something here?
810 } else
811 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
812 size);
813 break;
814 case 0x01c00f00: /* MBus port address register */
815 if (size == 8)
816 ret = env->mxccregs[7];
817 else
818 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
819 size);
820 break;
821 default:
822 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
823 size);
824 break;
826 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
827 "addr = %08x -> ret = %08x,"
828 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
829 #ifdef DEBUG_MXCC
830 dump_mxcc(env);
831 #endif
832 break;
833 case 3: /* MMU probe */
835 int mmulev;
837 mmulev = (addr >> 8) & 15;
838 if (mmulev > 4)
839 ret = 0;
840 else
841 ret = mmu_probe(env, addr, mmulev);
842 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
843 addr, mmulev, ret);
845 break;
846 case 4: /* read MMU regs */
848 int reg = (addr >> 8) & 0x1f;
850 ret = env->mmuregs[reg];
851 if (reg == 3) /* Fault status cleared on read */
852 env->mmuregs[3] = 0;
853 else if (reg == 0x13) /* Fault status read */
854 ret = env->mmuregs[3];
855 else if (reg == 0x14) /* Fault address read */
856 ret = env->mmuregs[4];
857 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
859 break;
860 case 5: // Turbosparc ITLB Diagnostic
861 case 6: // Turbosparc DTLB Diagnostic
862 case 7: // Turbosparc IOTLB Diagnostic
863 break;
864 case 9: /* Supervisor code access */
865 switch(size) {
866 case 1:
867 ret = ldub_code(addr);
868 break;
869 case 2:
870 ret = lduw_code(addr);
871 break;
872 default:
873 case 4:
874 ret = ldl_code(addr);
875 break;
876 case 8:
877 ret = ldq_code(addr);
878 break;
880 break;
881 case 0xa: /* User data access */
882 switch(size) {
883 case 1:
884 ret = ldub_user(addr);
885 break;
886 case 2:
887 ret = lduw_user(addr);
888 break;
889 default:
890 case 4:
891 ret = ldl_user(addr);
892 break;
893 case 8:
894 ret = ldq_user(addr);
895 break;
897 break;
898 case 0xb: /* Supervisor data access */
899 switch(size) {
900 case 1:
901 ret = ldub_kernel(addr);
902 break;
903 case 2:
904 ret = lduw_kernel(addr);
905 break;
906 default:
907 case 4:
908 ret = ldl_kernel(addr);
909 break;
910 case 8:
911 ret = ldq_kernel(addr);
912 break;
914 break;
915 case 0xc: /* I-cache tag */
916 case 0xd: /* I-cache data */
917 case 0xe: /* D-cache tag */
918 case 0xf: /* D-cache data */
919 break;
920 case 0x20: /* MMU passthrough */
921 switch(size) {
922 case 1:
923 ret = ldub_phys(addr);
924 break;
925 case 2:
926 ret = lduw_phys(addr);
927 break;
928 default:
929 case 4:
930 ret = ldl_phys(addr);
931 break;
932 case 8:
933 ret = ldq_phys(addr);
934 break;
936 break;
937 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
938 switch(size) {
939 case 1:
940 ret = ldub_phys((target_phys_addr_t)addr
941 | ((target_phys_addr_t)(asi & 0xf) << 32));
942 break;
943 case 2:
944 ret = lduw_phys((target_phys_addr_t)addr
945 | ((target_phys_addr_t)(asi & 0xf) << 32));
946 break;
947 default:
948 case 4:
949 ret = ldl_phys((target_phys_addr_t)addr
950 | ((target_phys_addr_t)(asi & 0xf) << 32));
951 break;
952 case 8:
953 ret = ldq_phys((target_phys_addr_t)addr
954 | ((target_phys_addr_t)(asi & 0xf) << 32));
955 break;
957 break;
958 case 0x30: // Turbosparc secondary cache diagnostic
959 case 0x31: // Turbosparc RAM snoop
960 case 0x32: // Turbosparc page table descriptor diagnostic
961 case 0x39: /* data cache diagnostic register */
962 ret = 0;
963 break;
964 case 8: /* User code access, XXX */
965 default:
966 do_unassigned_access(addr, 0, 0, asi);
967 ret = 0;
968 break;
970 if (sign) {
971 switch(size) {
972 case 1:
973 ret = (int8_t) ret;
974 break;
975 case 2:
976 ret = (int16_t) ret;
977 break;
978 case 4:
979 ret = (int32_t) ret;
980 break;
981 default:
982 break;
985 #ifdef DEBUG_ASI
986 dump_asi("read ", last_addr, asi, size, ret);
987 #endif
988 return ret;
991 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
993 helper_check_align(addr, size - 1);
994 switch(asi) {
995 case 2: /* SuperSparc MXCC registers */
996 switch (addr) {
997 case 0x01c00000: /* MXCC stream data register 0 */
998 if (size == 8)
999 env->mxccdata[0] = val;
1000 else
1001 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1002 size);
1003 break;
1004 case 0x01c00008: /* MXCC stream data register 1 */
1005 if (size == 8)
1006 env->mxccdata[1] = val;
1007 else
1008 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1009 size);
1010 break;
1011 case 0x01c00010: /* MXCC stream data register 2 */
1012 if (size == 8)
1013 env->mxccdata[2] = val;
1014 else
1015 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1016 size);
1017 break;
1018 case 0x01c00018: /* MXCC stream data register 3 */
1019 if (size == 8)
1020 env->mxccdata[3] = val;
1021 else
1022 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1023 size);
1024 break;
1025 case 0x01c00100: /* MXCC stream source */
1026 if (size == 8)
1027 env->mxccregs[0] = val;
1028 else
1029 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1030 size);
1031 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1033 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1035 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1036 16);
1037 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1038 24);
1039 break;
1040 case 0x01c00200: /* MXCC stream destination */
1041 if (size == 8)
1042 env->mxccregs[1] = val;
1043 else
1044 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1045 size);
1046 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
1047 env->mxccdata[0]);
1048 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
1049 env->mxccdata[1]);
1050 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1051 env->mxccdata[2]);
1052 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1053 env->mxccdata[3]);
1054 break;
1055 case 0x01c00a00: /* MXCC control register */
1056 if (size == 8)
1057 env->mxccregs[3] = val;
1058 else
1059 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1060 size);
1061 break;
1062 case 0x01c00a04: /* MXCC control register */
1063 if (size == 4)
1064 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
1065 | val;
1066 else
1067 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1068 size);
1069 break;
1070 case 0x01c00e00: /* MXCC error register */
1071 // writing a 1 bit clears the error
1072 if (size == 8)
1073 env->mxccregs[6] &= ~val;
1074 else
1075 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1076 size);
1077 break;
1078 case 0x01c00f00: /* MBus port address register */
1079 if (size == 8)
1080 env->mxccregs[7] = val;
1081 else
1082 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1083 size);
1084 break;
1085 default:
1086 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1087 size);
1088 break;
1090 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %08x\n", asi,
1091 size, addr, val);
1092 #ifdef DEBUG_MXCC
1093 dump_mxcc(env);
1094 #endif
1095 break;
1096 case 3: /* MMU flush */
1098 int mmulev;
1100 mmulev = (addr >> 8) & 15;
1101 DPRINTF_MMU("mmu flush level %d\n", mmulev);
1102 switch (mmulev) {
1103 case 0: // flush page
1104 tlb_flush_page(env, addr & 0xfffff000);
1105 break;
1106 case 1: // flush segment (256k)
1107 case 2: // flush region (16M)
1108 case 3: // flush context (4G)
1109 case 4: // flush entire
1110 tlb_flush(env, 1);
1111 break;
1112 default:
1113 break;
1115 #ifdef DEBUG_MMU
1116 dump_mmu(env);
1117 #endif
1119 break;
1120 case 4: /* write MMU regs */
1122 int reg = (addr >> 8) & 0x1f;
1123 uint32_t oldreg;
1125 oldreg = env->mmuregs[reg];
1126 switch(reg) {
1127 case 0: // Control Register
1128 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1129 (val & 0x00ffffff);
1130 // Mappings generated during no-fault mode or MMU
1131 // disabled mode are invalid in normal mode
1132 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1133 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1134 tlb_flush(env, 1);
1135 break;
1136 case 1: // Context Table Pointer Register
1137 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1138 break;
1139 case 2: // Context Register
1140 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1141 if (oldreg != env->mmuregs[reg]) {
1142 /* we flush when the MMU context changes because
1143 QEMU has no MMU context support */
1144 tlb_flush(env, 1);
1146 break;
1147 case 3: // Synchronous Fault Status Register with Clear
1148 case 4: // Synchronous Fault Address Register
1149 break;
1150 case 0x10: // TLB Replacement Control Register
1151 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1152 break;
1153 case 0x13: // Synchronous Fault Status Register with Read and Clear
1154 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
1155 break;
1156 case 0x14: // Synchronous Fault Address Register
1157 env->mmuregs[4] = val;
1158 break;
1159 default:
1160 env->mmuregs[reg] = val;
1161 break;
1163 if (oldreg != env->mmuregs[reg]) {
1164 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1165 reg, oldreg, env->mmuregs[reg]);
1167 #ifdef DEBUG_MMU
1168 dump_mmu(env);
1169 #endif
1171 break;
1172 case 5: // Turbosparc ITLB Diagnostic
1173 case 6: // Turbosparc DTLB Diagnostic
1174 case 7: // Turbosparc IOTLB Diagnostic
1175 break;
1176 case 0xa: /* User data access */
1177 switch(size) {
1178 case 1:
1179 stb_user(addr, val);
1180 break;
1181 case 2:
1182 stw_user(addr, val);
1183 break;
1184 default:
1185 case 4:
1186 stl_user(addr, val);
1187 break;
1188 case 8:
1189 stq_user(addr, val);
1190 break;
1192 break;
1193 case 0xb: /* Supervisor data access */
1194 switch(size) {
1195 case 1:
1196 stb_kernel(addr, val);
1197 break;
1198 case 2:
1199 stw_kernel(addr, val);
1200 break;
1201 default:
1202 case 4:
1203 stl_kernel(addr, val);
1204 break;
1205 case 8:
1206 stq_kernel(addr, val);
1207 break;
1209 break;
1210 case 0xc: /* I-cache tag */
1211 case 0xd: /* I-cache data */
1212 case 0xe: /* D-cache tag */
1213 case 0xf: /* D-cache data */
1214 case 0x10: /* I/D-cache flush page */
1215 case 0x11: /* I/D-cache flush segment */
1216 case 0x12: /* I/D-cache flush region */
1217 case 0x13: /* I/D-cache flush context */
1218 case 0x14: /* I/D-cache flush user */
1219 break;
1220 case 0x17: /* Block copy, sta access */
1222 // val = src
1223 // addr = dst
1224 // copy 32 bytes
1225 unsigned int i;
1226 uint32_t src = val & ~3, dst = addr & ~3, temp;
1228 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
1229 temp = ldl_kernel(src);
1230 stl_kernel(dst, temp);
1233 break;
1234 case 0x1f: /* Block fill, stda access */
1236 // addr = dst
1237 // fill 32 bytes with val
1238 unsigned int i;
1239 uint32_t dst = addr & 7;
1241 for (i = 0; i < 32; i += 8, dst += 8)
1242 stq_kernel(dst, val);
1244 break;
1245 case 0x20: /* MMU passthrough */
1247 switch(size) {
1248 case 1:
1249 stb_phys(addr, val);
1250 break;
1251 case 2:
1252 stw_phys(addr, val);
1253 break;
1254 case 4:
1255 default:
1256 stl_phys(addr, val);
1257 break;
1258 case 8:
1259 stq_phys(addr, val);
1260 break;
1263 break;
1264 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1266 switch(size) {
1267 case 1:
1268 stb_phys((target_phys_addr_t)addr
1269 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1270 break;
1271 case 2:
1272 stw_phys((target_phys_addr_t)addr
1273 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1274 break;
1275 case 4:
1276 default:
1277 stl_phys((target_phys_addr_t)addr
1278 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1279 break;
1280 case 8:
1281 stq_phys((target_phys_addr_t)addr
1282 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1283 break;
1286 break;
1287 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1288 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1289 // Turbosparc snoop RAM
1290 case 0x32: // store buffer control or Turbosparc page table
1291 // descriptor diagnostic
1292 case 0x36: /* I-cache flash clear */
1293 case 0x37: /* D-cache flash clear */
1294 case 0x38: /* breakpoint diagnostics */
1295 case 0x4c: /* breakpoint action */
1296 break;
1297 case 8: /* User code access, XXX */
1298 case 9: /* Supervisor code access, XXX */
1299 default:
1300 do_unassigned_access(addr, 1, 0, asi);
1301 break;
1303 #ifdef DEBUG_ASI
1304 dump_asi("write", addr, asi, size, val);
1305 #endif
1308 #endif /* CONFIG_USER_ONLY */
1309 #else /* TARGET_SPARC64 */
1311 #ifdef CONFIG_USER_ONLY
1312 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1314 uint64_t ret = 0;
1315 #if defined(DEBUG_ASI)
1316 target_ulong last_addr = addr;
1317 #endif
1319 if (asi < 0x80)
1320 raise_exception(TT_PRIV_ACT);
1322 helper_check_align(addr, size - 1);
1323 address_mask(env, &addr);
1325 switch (asi) {
1326 case 0x82: // Primary no-fault
1327 case 0x8a: // Primary no-fault LE
1328 if (page_check_range(addr, size, PAGE_READ) == -1) {
1329 #ifdef DEBUG_ASI
1330 dump_asi("read ", last_addr, asi, size, ret);
1331 #endif
1332 return 0;
1334 // Fall through
1335 case 0x80: // Primary
1336 case 0x88: // Primary LE
1338 switch(size) {
1339 case 1:
1340 ret = ldub_raw(addr);
1341 break;
1342 case 2:
1343 ret = lduw_raw(addr);
1344 break;
1345 case 4:
1346 ret = ldl_raw(addr);
1347 break;
1348 default:
1349 case 8:
1350 ret = ldq_raw(addr);
1351 break;
1354 break;
1355 case 0x83: // Secondary no-fault
1356 case 0x8b: // Secondary no-fault LE
1357 if (page_check_range(addr, size, PAGE_READ) == -1) {
1358 #ifdef DEBUG_ASI
1359 dump_asi("read ", last_addr, asi, size, ret);
1360 #endif
1361 return 0;
1363 // Fall through
1364 case 0x81: // Secondary
1365 case 0x89: // Secondary LE
1366 // XXX
1367 break;
1368 default:
1369 break;
1372 /* Convert from little endian */
1373 switch (asi) {
1374 case 0x88: // Primary LE
1375 case 0x89: // Secondary LE
1376 case 0x8a: // Primary no-fault LE
1377 case 0x8b: // Secondary no-fault LE
1378 switch(size) {
1379 case 2:
1380 ret = bswap16(ret);
1381 break;
1382 case 4:
1383 ret = bswap32(ret);
1384 break;
1385 case 8:
1386 ret = bswap64(ret);
1387 break;
1388 default:
1389 break;
1391 default:
1392 break;
1395 /* Convert to signed number */
1396 if (sign) {
1397 switch(size) {
1398 case 1:
1399 ret = (int8_t) ret;
1400 break;
1401 case 2:
1402 ret = (int16_t) ret;
1403 break;
1404 case 4:
1405 ret = (int32_t) ret;
1406 break;
1407 default:
1408 break;
1411 #ifdef DEBUG_ASI
1412 dump_asi("read ", last_addr, asi, size, ret);
1413 #endif
1414 return ret;
1417 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1419 #ifdef DEBUG_ASI
1420 dump_asi("write", addr, asi, size, val);
1421 #endif
1422 if (asi < 0x80)
1423 raise_exception(TT_PRIV_ACT);
1425 helper_check_align(addr, size - 1);
1426 address_mask(env, &addr);
1428 /* Convert to little endian */
1429 switch (asi) {
1430 case 0x88: // Primary LE
1431 case 0x89: // Secondary LE
1432 switch(size) {
1433 case 2:
1434 addr = bswap16(addr);
1435 break;
1436 case 4:
1437 addr = bswap32(addr);
1438 break;
1439 case 8:
1440 addr = bswap64(addr);
1441 break;
1442 default:
1443 break;
1445 default:
1446 break;
1449 switch(asi) {
1450 case 0x80: // Primary
1451 case 0x88: // Primary LE
1453 switch(size) {
1454 case 1:
1455 stb_raw(addr, val);
1456 break;
1457 case 2:
1458 stw_raw(addr, val);
1459 break;
1460 case 4:
1461 stl_raw(addr, val);
1462 break;
1463 case 8:
1464 default:
1465 stq_raw(addr, val);
1466 break;
1469 break;
1470 case 0x81: // Secondary
1471 case 0x89: // Secondary LE
1472 // XXX
1473 return;
1475 case 0x82: // Primary no-fault, RO
1476 case 0x83: // Secondary no-fault, RO
1477 case 0x8a: // Primary no-fault LE, RO
1478 case 0x8b: // Secondary no-fault LE, RO
1479 default:
1480 do_unassigned_access(addr, 1, 0, 1);
1481 return;
1485 #else /* CONFIG_USER_ONLY */
1487 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1489 uint64_t ret = 0;
1490 #if defined(DEBUG_ASI)
1491 target_ulong last_addr = addr;
1492 #endif
1494 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1495 || ((env->def->features & CPU_FEATURE_HYPV)
1496 && asi >= 0x30 && asi < 0x80
1497 && !(env->hpstate & HS_PRIV)))
1498 raise_exception(TT_PRIV_ACT);
1500 helper_check_align(addr, size - 1);
1501 switch (asi) {
1502 case 0x82: // Primary no-fault
1503 case 0x8a: // Primary no-fault LE
1504 if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
1505 #ifdef DEBUG_ASI
1506 dump_asi("read ", last_addr, asi, size, ret);
1507 #endif
1508 return 0;
1510 // Fall through
1511 case 0x10: // As if user primary
1512 case 0x18: // As if user primary LE
1513 case 0x80: // Primary
1514 case 0x88: // Primary LE
1515 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1516 if ((env->def->features & CPU_FEATURE_HYPV)
1517 && env->hpstate & HS_PRIV) {
1518 switch(size) {
1519 case 1:
1520 ret = ldub_hypv(addr);
1521 break;
1522 case 2:
1523 ret = lduw_hypv(addr);
1524 break;
1525 case 4:
1526 ret = ldl_hypv(addr);
1527 break;
1528 default:
1529 case 8:
1530 ret = ldq_hypv(addr);
1531 break;
1533 } else {
1534 switch(size) {
1535 case 1:
1536 ret = ldub_kernel(addr);
1537 break;
1538 case 2:
1539 ret = lduw_kernel(addr);
1540 break;
1541 case 4:
1542 ret = ldl_kernel(addr);
1543 break;
1544 default:
1545 case 8:
1546 ret = ldq_kernel(addr);
1547 break;
1550 } else {
1551 switch(size) {
1552 case 1:
1553 ret = ldub_user(addr);
1554 break;
1555 case 2:
1556 ret = lduw_user(addr);
1557 break;
1558 case 4:
1559 ret = ldl_user(addr);
1560 break;
1561 default:
1562 case 8:
1563 ret = ldq_user(addr);
1564 break;
1567 break;
1568 case 0x14: // Bypass
1569 case 0x15: // Bypass, non-cacheable
1570 case 0x1c: // Bypass LE
1571 case 0x1d: // Bypass, non-cacheable LE
1573 switch(size) {
1574 case 1:
1575 ret = ldub_phys(addr);
1576 break;
1577 case 2:
1578 ret = lduw_phys(addr);
1579 break;
1580 case 4:
1581 ret = ldl_phys(addr);
1582 break;
1583 default:
1584 case 8:
1585 ret = ldq_phys(addr);
1586 break;
1588 break;
1590 case 0x24: // Nucleus quad LDD 128 bit atomic
1591 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1592 // Only ldda allowed
1593 raise_exception(TT_ILL_INSN);
1594 return 0;
1595 case 0x83: // Secondary no-fault
1596 case 0x8b: // Secondary no-fault LE
1597 if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
1598 #ifdef DEBUG_ASI
1599 dump_asi("read ", last_addr, asi, size, ret);
1600 #endif
1601 return 0;
1603 // Fall through
1604 case 0x04: // Nucleus
1605 case 0x0c: // Nucleus Little Endian (LE)
1606 case 0x11: // As if user secondary
1607 case 0x19: // As if user secondary LE
1608 case 0x4a: // UPA config
1609 case 0x81: // Secondary
1610 case 0x89: // Secondary LE
1611 // XXX
1612 break;
1613 case 0x45: // LSU
1614 ret = env->lsu;
1615 break;
1616 case 0x50: // I-MMU regs
1618 int reg = (addr >> 3) & 0xf;
1620 ret = env->immuregs[reg];
1621 break;
1623 case 0x51: // I-MMU 8k TSB pointer
1624 case 0x52: // I-MMU 64k TSB pointer
1625 // XXX
1626 break;
1627 case 0x55: // I-MMU data access
1629 int reg = (addr >> 3) & 0x3f;
1631 ret = env->itlb_tte[reg];
1632 break;
1634 case 0x56: // I-MMU tag read
1636 int reg = (addr >> 3) & 0x3f;
1638 ret = env->itlb_tag[reg];
1639 break;
1641 case 0x58: // D-MMU regs
1643 int reg = (addr >> 3) & 0xf;
1645 ret = env->dmmuregs[reg];
1646 break;
1648 case 0x5d: // D-MMU data access
1650 int reg = (addr >> 3) & 0x3f;
1652 ret = env->dtlb_tte[reg];
1653 break;
1655 case 0x5e: // D-MMU tag read
1657 int reg = (addr >> 3) & 0x3f;
1659 ret = env->dtlb_tag[reg];
1660 break;
1662 case 0x46: // D-cache data
1663 case 0x47: // D-cache tag access
1664 case 0x4b: // E-cache error enable
1665 case 0x4c: // E-cache asynchronous fault status
1666 case 0x4d: // E-cache asynchronous fault address
1667 case 0x4e: // E-cache tag data
1668 case 0x66: // I-cache instruction access
1669 case 0x67: // I-cache tag access
1670 case 0x6e: // I-cache predecode
1671 case 0x6f: // I-cache LRU etc.
1672 case 0x76: // E-cache tag
1673 case 0x7e: // E-cache tag
1674 break;
1675 case 0x59: // D-MMU 8k TSB pointer
1676 case 0x5a: // D-MMU 64k TSB pointer
1677 case 0x5b: // D-MMU data pointer
1678 case 0x48: // Interrupt dispatch, RO
1679 case 0x49: // Interrupt data receive
1680 case 0x7f: // Incoming interrupt vector, RO
1681 // XXX
1682 break;
1683 case 0x54: // I-MMU data in, WO
1684 case 0x57: // I-MMU demap, WO
1685 case 0x5c: // D-MMU data in, WO
1686 case 0x5f: // D-MMU demap, WO
1687 case 0x77: // Interrupt vector, WO
1688 default:
1689 do_unassigned_access(addr, 0, 0, 1);
1690 ret = 0;
1691 break;
1694 /* Convert from little endian */
1695 switch (asi) {
1696 case 0x0c: // Nucleus Little Endian (LE)
1697 case 0x18: // As if user primary LE
1698 case 0x19: // As if user secondary LE
1699 case 0x1c: // Bypass LE
1700 case 0x1d: // Bypass, non-cacheable LE
1701 case 0x88: // Primary LE
1702 case 0x89: // Secondary LE
1703 case 0x8a: // Primary no-fault LE
1704 case 0x8b: // Secondary no-fault LE
1705 switch(size) {
1706 case 2:
1707 ret = bswap16(ret);
1708 break;
1709 case 4:
1710 ret = bswap32(ret);
1711 break;
1712 case 8:
1713 ret = bswap64(ret);
1714 break;
1715 default:
1716 break;
1718 default:
1719 break;
1722 /* Convert to signed number */
1723 if (sign) {
1724 switch(size) {
1725 case 1:
1726 ret = (int8_t) ret;
1727 break;
1728 case 2:
1729 ret = (int16_t) ret;
1730 break;
1731 case 4:
1732 ret = (int32_t) ret;
1733 break;
1734 default:
1735 break;
1738 #ifdef DEBUG_ASI
1739 dump_asi("read ", last_addr, asi, size, ret);
1740 #endif
1741 return ret;
1744 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1746 #ifdef DEBUG_ASI
1747 dump_asi("write", addr, asi, size, val);
1748 #endif
1749 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1750 || ((env->def->features & CPU_FEATURE_HYPV)
1751 && asi >= 0x30 && asi < 0x80
1752 && !(env->hpstate & HS_PRIV)))
1753 raise_exception(TT_PRIV_ACT);
1755 helper_check_align(addr, size - 1);
1756 /* Convert to little endian */
1757 switch (asi) {
1758 case 0x0c: // Nucleus Little Endian (LE)
1759 case 0x18: // As if user primary LE
1760 case 0x19: // As if user secondary LE
1761 case 0x1c: // Bypass LE
1762 case 0x1d: // Bypass, non-cacheable LE
1763 case 0x88: // Primary LE
1764 case 0x89: // Secondary LE
1765 switch(size) {
1766 case 2:
1767 addr = bswap16(addr);
1768 break;
1769 case 4:
1770 addr = bswap32(addr);
1771 break;
1772 case 8:
1773 addr = bswap64(addr);
1774 break;
1775 default:
1776 break;
1778 default:
1779 break;
1782 switch(asi) {
1783 case 0x10: // As if user primary
1784 case 0x18: // As if user primary LE
1785 case 0x80: // Primary
1786 case 0x88: // Primary LE
1787 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1788 if ((env->def->features & CPU_FEATURE_HYPV)
1789 && env->hpstate & HS_PRIV) {
1790 switch(size) {
1791 case 1:
1792 stb_hypv(addr, val);
1793 break;
1794 case 2:
1795 stw_hypv(addr, val);
1796 break;
1797 case 4:
1798 stl_hypv(addr, val);
1799 break;
1800 case 8:
1801 default:
1802 stq_hypv(addr, val);
1803 break;
1805 } else {
1806 switch(size) {
1807 case 1:
1808 stb_kernel(addr, val);
1809 break;
1810 case 2:
1811 stw_kernel(addr, val);
1812 break;
1813 case 4:
1814 stl_kernel(addr, val);
1815 break;
1816 case 8:
1817 default:
1818 stq_kernel(addr, val);
1819 break;
1822 } else {
1823 switch(size) {
1824 case 1:
1825 stb_user(addr, val);
1826 break;
1827 case 2:
1828 stw_user(addr, val);
1829 break;
1830 case 4:
1831 stl_user(addr, val);
1832 break;
1833 case 8:
1834 default:
1835 stq_user(addr, val);
1836 break;
1839 break;
1840 case 0x14: // Bypass
1841 case 0x15: // Bypass, non-cacheable
1842 case 0x1c: // Bypass LE
1843 case 0x1d: // Bypass, non-cacheable LE
1845 switch(size) {
1846 case 1:
1847 stb_phys(addr, val);
1848 break;
1849 case 2:
1850 stw_phys(addr, val);
1851 break;
1852 case 4:
1853 stl_phys(addr, val);
1854 break;
1855 case 8:
1856 default:
1857 stq_phys(addr, val);
1858 break;
1861 return;
1862 case 0x24: // Nucleus quad LDD 128 bit atomic
1863 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1864 // Only ldda allowed
1865 raise_exception(TT_ILL_INSN);
1866 return;
1867 case 0x04: // Nucleus
1868 case 0x0c: // Nucleus Little Endian (LE)
1869 case 0x11: // As if user secondary
1870 case 0x19: // As if user secondary LE
1871 case 0x4a: // UPA config
1872 case 0x81: // Secondary
1873 case 0x89: // Secondary LE
1874 // XXX
1875 return;
1876 case 0x45: // LSU
1878 uint64_t oldreg;
1880 oldreg = env->lsu;
1881 env->lsu = val & (DMMU_E | IMMU_E);
1882 // Mappings generated during D/I MMU disabled mode are
1883 // invalid in normal mode
1884 if (oldreg != env->lsu) {
1885 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
1886 oldreg, env->lsu);
1887 #ifdef DEBUG_MMU
1888 dump_mmu(env);
1889 #endif
1890 tlb_flush(env, 1);
1892 return;
1894 case 0x50: // I-MMU regs
1896 int reg = (addr >> 3) & 0xf;
1897 uint64_t oldreg;
1899 oldreg = env->immuregs[reg];
1900 switch(reg) {
1901 case 0: // RO
1902 case 4:
1903 return;
1904 case 1: // Not in I-MMU
1905 case 2:
1906 case 7:
1907 case 8:
1908 return;
1909 case 3: // SFSR
1910 if ((val & 1) == 0)
1911 val = 0; // Clear SFSR
1912 break;
1913 case 5: // TSB access
1914 case 6: // Tag access
1915 default:
1916 break;
1918 env->immuregs[reg] = val;
1919 if (oldreg != env->immuregs[reg]) {
1920 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
1921 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1923 #ifdef DEBUG_MMU
1924 dump_mmu(env);
1925 #endif
1926 return;
1928 case 0x54: // I-MMU data in
1930 unsigned int i;
1932 // Try finding an invalid entry
1933 for (i = 0; i < 64; i++) {
1934 if ((env->itlb_tte[i] & 0x8000000000000000ULL) == 0) {
1935 env->itlb_tag[i] = env->immuregs[6];
1936 env->itlb_tte[i] = val;
1937 return;
1940 // Try finding an unlocked entry
1941 for (i = 0; i < 64; i++) {
1942 if ((env->itlb_tte[i] & 0x40) == 0) {
1943 env->itlb_tag[i] = env->immuregs[6];
1944 env->itlb_tte[i] = val;
1945 return;
1948 // error state?
1949 return;
1951 case 0x55: // I-MMU data access
1953 unsigned int i = (addr >> 3) & 0x3f;
1955 env->itlb_tag[i] = env->immuregs[6];
1956 env->itlb_tte[i] = val;
1957 return;
1959 case 0x57: // I-MMU demap
1960 // XXX
1961 return;
1962 case 0x58: // D-MMU regs
1964 int reg = (addr >> 3) & 0xf;
1965 uint64_t oldreg;
1967 oldreg = env->dmmuregs[reg];
1968 switch(reg) {
1969 case 0: // RO
1970 case 4:
1971 return;
1972 case 3: // SFSR
1973 if ((val & 1) == 0) {
1974 val = 0; // Clear SFSR, Fault address
1975 env->dmmuregs[4] = 0;
1977 env->dmmuregs[reg] = val;
1978 break;
1979 case 1: // Primary context
1980 case 2: // Secondary context
1981 case 5: // TSB access
1982 case 6: // Tag access
1983 case 7: // Virtual Watchpoint
1984 case 8: // Physical Watchpoint
1985 default:
1986 break;
1988 env->dmmuregs[reg] = val;
1989 if (oldreg != env->dmmuregs[reg]) {
1990 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
1991 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
1993 #ifdef DEBUG_MMU
1994 dump_mmu(env);
1995 #endif
1996 return;
1998 case 0x5c: // D-MMU data in
2000 unsigned int i;
2002 // Try finding an invalid entry
2003 for (i = 0; i < 64; i++) {
2004 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) == 0) {
2005 env->dtlb_tag[i] = env->dmmuregs[6];
2006 env->dtlb_tte[i] = val;
2007 return;
2010 // Try finding an unlocked entry
2011 for (i = 0; i < 64; i++) {
2012 if ((env->dtlb_tte[i] & 0x40) == 0) {
2013 env->dtlb_tag[i] = env->dmmuregs[6];
2014 env->dtlb_tte[i] = val;
2015 return;
2018 // error state?
2019 return;
2021 case 0x5d: // D-MMU data access
2023 unsigned int i = (addr >> 3) & 0x3f;
2025 env->dtlb_tag[i] = env->dmmuregs[6];
2026 env->dtlb_tte[i] = val;
2027 return;
2029 case 0x5f: // D-MMU demap
2030 case 0x49: // Interrupt data receive
2031 // XXX
2032 return;
2033 case 0x46: // D-cache data
2034 case 0x47: // D-cache tag access
2035 case 0x4b: // E-cache error enable
2036 case 0x4c: // E-cache asynchronous fault status
2037 case 0x4d: // E-cache asynchronous fault address
2038 case 0x4e: // E-cache tag data
2039 case 0x66: // I-cache instruction access
2040 case 0x67: // I-cache tag access
2041 case 0x6e: // I-cache predecode
2042 case 0x6f: // I-cache LRU etc.
2043 case 0x76: // E-cache tag
2044 case 0x7e: // E-cache tag
2045 return;
2046 case 0x51: // I-MMU 8k TSB pointer, RO
2047 case 0x52: // I-MMU 64k TSB pointer, RO
2048 case 0x56: // I-MMU tag read, RO
2049 case 0x59: // D-MMU 8k TSB pointer, RO
2050 case 0x5a: // D-MMU 64k TSB pointer, RO
2051 case 0x5b: // D-MMU data pointer, RO
2052 case 0x5e: // D-MMU tag read, RO
2053 case 0x48: // Interrupt dispatch, RO
2054 case 0x7f: // Incoming interrupt vector, RO
2055 case 0x82: // Primary no-fault, RO
2056 case 0x83: // Secondary no-fault, RO
2057 case 0x8a: // Primary no-fault LE, RO
2058 case 0x8b: // Secondary no-fault LE, RO
2059 default:
2060 do_unassigned_access(addr, 1, 0, 1);
2061 return;
2064 #endif /* CONFIG_USER_ONLY */
2066 void helper_ldda_asi(target_ulong addr, int asi, int rd)
2068 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2069 || ((env->def->features & CPU_FEATURE_HYPV)
2070 && asi >= 0x30 && asi < 0x80
2071 && !(env->hpstate & HS_PRIV)))
2072 raise_exception(TT_PRIV_ACT);
2074 switch (asi) {
2075 case 0x24: // Nucleus quad LDD 128 bit atomic
2076 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2077 helper_check_align(addr, 0xf);
2078 if (rd == 0) {
2079 env->gregs[1] = ldq_kernel(addr + 8);
2080 if (asi == 0x2c)
2081 bswap64s(&env->gregs[1]);
2082 } else if (rd < 8) {
2083 env->gregs[rd] = ldq_kernel(addr);
2084 env->gregs[rd + 1] = ldq_kernel(addr + 8);
2085 if (asi == 0x2c) {
2086 bswap64s(&env->gregs[rd]);
2087 bswap64s(&env->gregs[rd + 1]);
2089 } else {
2090 env->regwptr[rd] = ldq_kernel(addr);
2091 env->regwptr[rd + 1] = ldq_kernel(addr + 8);
2092 if (asi == 0x2c) {
2093 bswap64s(&env->regwptr[rd]);
2094 bswap64s(&env->regwptr[rd + 1]);
2097 break;
2098 default:
2099 helper_check_align(addr, 0x3);
2100 if (rd == 0)
2101 env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
2102 else if (rd < 8) {
2103 env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
2104 env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2105 } else {
2106 env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
2107 env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2109 break;
2113 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2115 unsigned int i;
2116 target_ulong val;
2118 helper_check_align(addr, 3);
2119 switch (asi) {
2120 case 0xf0: // Block load primary
2121 case 0xf1: // Block load secondary
2122 case 0xf8: // Block load primary LE
2123 case 0xf9: // Block load secondary LE
2124 if (rd & 7) {
2125 raise_exception(TT_ILL_INSN);
2126 return;
2128 helper_check_align(addr, 0x3f);
2129 for (i = 0; i < 16; i++) {
2130 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
2132 addr += 4;
2135 return;
2136 default:
2137 break;
2140 val = helper_ld_asi(addr, asi, size, 0);
2141 switch(size) {
2142 default:
2143 case 4:
2144 *((uint32_t *)&env->fpr[rd]) = val;
2145 break;
2146 case 8:
2147 *((int64_t *)&DT0) = val;
2148 break;
2149 case 16:
2150 // XXX
2151 break;
2155 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2157 unsigned int i;
2158 target_ulong val = 0;
2160 helper_check_align(addr, 3);
2161 switch (asi) {
2162 case 0xf0: // Block store primary
2163 case 0xf1: // Block store secondary
2164 case 0xf8: // Block store primary LE
2165 case 0xf9: // Block store secondary LE
2166 if (rd & 7) {
2167 raise_exception(TT_ILL_INSN);
2168 return;
2170 helper_check_align(addr, 0x3f);
2171 for (i = 0; i < 16; i++) {
2172 val = *(uint32_t *)&env->fpr[rd++];
2173 helper_st_asi(addr, val, asi & 0x8f, 4);
2174 addr += 4;
2177 return;
2178 default:
2179 break;
2182 switch(size) {
2183 default:
2184 case 4:
2185 val = *((uint32_t *)&env->fpr[rd]);
2186 break;
2187 case 8:
2188 val = *((int64_t *)&DT0);
2189 break;
2190 case 16:
2191 // XXX
2192 break;
2194 helper_st_asi(addr, val, asi, size);
2197 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2198 target_ulong val2, uint32_t asi)
2200 target_ulong ret;
2202 val2 &= 0xffffffffUL;
2203 ret = helper_ld_asi(addr, asi, 4, 0);
2204 ret &= 0xffffffffUL;
2205 if (val2 == ret)
2206 helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
2207 return ret;
2210 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2211 target_ulong val2, uint32_t asi)
2213 target_ulong ret;
2215 ret = helper_ld_asi(addr, asi, 8, 0);
2216 if (val2 == ret)
2217 helper_st_asi(addr, val1, asi, 8);
2218 return ret;
2220 #endif /* TARGET_SPARC64 */
2222 #ifndef TARGET_SPARC64
2223 void helper_rett(void)
2225 unsigned int cwp;
2227 if (env->psret == 1)
2228 raise_exception(TT_ILL_INSN);
2230 env->psret = 1;
2231 cwp = cpu_cwp_inc(env, env->cwp + 1) ;
2232 if (env->wim & (1 << cwp)) {
2233 raise_exception(TT_WIN_UNF);
2235 set_cwp(cwp);
2236 env->psrs = env->psrps;
2238 #endif
2240 target_ulong helper_udiv(target_ulong a, target_ulong b)
2242 uint64_t x0;
2243 uint32_t x1;
2245 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2246 x1 = b;
2248 if (x1 == 0) {
2249 raise_exception(TT_DIV_ZERO);
2252 x0 = x0 / x1;
2253 if (x0 > 0xffffffff) {
2254 env->cc_src2 = 1;
2255 return 0xffffffff;
2256 } else {
2257 env->cc_src2 = 0;
2258 return x0;
2262 target_ulong helper_sdiv(target_ulong a, target_ulong b)
2264 int64_t x0;
2265 int32_t x1;
2267 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2268 x1 = b;
2270 if (x1 == 0) {
2271 raise_exception(TT_DIV_ZERO);
2274 x0 = x0 / x1;
2275 if ((int32_t) x0 != x0) {
2276 env->cc_src2 = 1;
2277 return x0 < 0? 0x80000000: 0x7fffffff;
2278 } else {
2279 env->cc_src2 = 0;
2280 return x0;
2284 void helper_stdf(target_ulong addr, int mem_idx)
2286 helper_check_align(addr, 7);
2287 #if !defined(CONFIG_USER_ONLY)
2288 switch (mem_idx) {
2289 case 0:
2290 stfq_user(addr, DT0);
2291 break;
2292 case 1:
2293 stfq_kernel(addr, DT0);
2294 break;
2295 #ifdef TARGET_SPARC64
2296 case 2:
2297 stfq_hypv(addr, DT0);
2298 break;
2299 #endif
2300 default:
2301 break;
2303 #else
2304 address_mask(env, &addr);
2305 stfq_raw(addr, DT0);
2306 #endif
2309 void helper_lddf(target_ulong addr, int mem_idx)
2311 helper_check_align(addr, 7);
2312 #if !defined(CONFIG_USER_ONLY)
2313 switch (mem_idx) {
2314 case 0:
2315 DT0 = ldfq_user(addr);
2316 break;
2317 case 1:
2318 DT0 = ldfq_kernel(addr);
2319 break;
2320 #ifdef TARGET_SPARC64
2321 case 2:
2322 DT0 = ldfq_hypv(addr);
2323 break;
2324 #endif
2325 default:
2326 break;
2328 #else
2329 address_mask(env, &addr);
2330 DT0 = ldfq_raw(addr);
2331 #endif
2334 void helper_ldqf(target_ulong addr, int mem_idx)
2336 // XXX add 128 bit load
2337 CPU_QuadU u;
2339 helper_check_align(addr, 7);
2340 #if !defined(CONFIG_USER_ONLY)
2341 switch (mem_idx) {
2342 case 0:
2343 u.ll.upper = ldq_user(addr);
2344 u.ll.lower = ldq_user(addr + 8);
2345 QT0 = u.q;
2346 break;
2347 case 1:
2348 u.ll.upper = ldq_kernel(addr);
2349 u.ll.lower = ldq_kernel(addr + 8);
2350 QT0 = u.q;
2351 break;
2352 #ifdef TARGET_SPARC64
2353 case 2:
2354 u.ll.upper = ldq_hypv(addr);
2355 u.ll.lower = ldq_hypv(addr + 8);
2356 QT0 = u.q;
2357 break;
2358 #endif
2359 default:
2360 break;
2362 #else
2363 address_mask(env, &addr);
2364 u.ll.upper = ldq_raw(addr);
2365 u.ll.lower = ldq_raw((addr + 8) & 0xffffffffULL);
2366 QT0 = u.q;
2367 #endif
2370 void helper_stqf(target_ulong addr, int mem_idx)
2372 // XXX add 128 bit store
2373 CPU_QuadU u;
2375 helper_check_align(addr, 7);
2376 #if !defined(CONFIG_USER_ONLY)
2377 switch (mem_idx) {
2378 case 0:
2379 u.q = QT0;
2380 stq_user(addr, u.ll.upper);
2381 stq_user(addr + 8, u.ll.lower);
2382 break;
2383 case 1:
2384 u.q = QT0;
2385 stq_kernel(addr, u.ll.upper);
2386 stq_kernel(addr + 8, u.ll.lower);
2387 break;
2388 #ifdef TARGET_SPARC64
2389 case 2:
2390 u.q = QT0;
2391 stq_hypv(addr, u.ll.upper);
2392 stq_hypv(addr + 8, u.ll.lower);
2393 break;
2394 #endif
2395 default:
2396 break;
2398 #else
2399 u.q = QT0;
2400 address_mask(env, &addr);
2401 stq_raw(addr, u.ll.upper);
2402 stq_raw((addr + 8) & 0xffffffffULL, u.ll.lower);
2403 #endif
2406 static inline void set_fsr(void)
2408 int rnd_mode;
2410 switch (env->fsr & FSR_RD_MASK) {
2411 case FSR_RD_NEAREST:
2412 rnd_mode = float_round_nearest_even;
2413 break;
2414 default:
2415 case FSR_RD_ZERO:
2416 rnd_mode = float_round_to_zero;
2417 break;
2418 case FSR_RD_POS:
2419 rnd_mode = float_round_up;
2420 break;
2421 case FSR_RD_NEG:
2422 rnd_mode = float_round_down;
2423 break;
2425 set_float_rounding_mode(rnd_mode, &env->fp_status);
2428 void helper_ldfsr(uint32_t new_fsr)
2430 env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
2431 set_fsr();
2434 #ifdef TARGET_SPARC64
2435 void helper_ldxfsr(uint64_t new_fsr)
2437 env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
2438 set_fsr();
2440 #endif
2442 void helper_debug(void)
2444 env->exception_index = EXCP_DEBUG;
2445 cpu_loop_exit();
2448 #ifndef TARGET_SPARC64
2449 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2450 handling ? */
2451 void helper_save(void)
2453 uint32_t cwp;
2455 cwp = cpu_cwp_dec(env, env->cwp - 1);
2456 if (env->wim & (1 << cwp)) {
2457 raise_exception(TT_WIN_OVF);
2459 set_cwp(cwp);
2462 void helper_restore(void)
2464 uint32_t cwp;
2466 cwp = cpu_cwp_inc(env, env->cwp + 1);
2467 if (env->wim & (1 << cwp)) {
2468 raise_exception(TT_WIN_UNF);
2470 set_cwp(cwp);
2473 void helper_wrpsr(target_ulong new_psr)
2475 if ((new_psr & PSR_CWP) >= env->nwindows)
2476 raise_exception(TT_ILL_INSN);
2477 else
2478 PUT_PSR(env, new_psr);
2481 target_ulong helper_rdpsr(void)
2483 return GET_PSR(env);
2486 #else
2487 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2488 handling ? */
2489 void helper_save(void)
2491 uint32_t cwp;
2493 cwp = cpu_cwp_dec(env, env->cwp - 1);
2494 if (env->cansave == 0) {
2495 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2496 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2497 ((env->wstate & 0x7) << 2)));
2498 } else {
2499 if (env->cleanwin - env->canrestore == 0) {
2500 // XXX Clean windows without trap
2501 raise_exception(TT_CLRWIN);
2502 } else {
2503 env->cansave--;
2504 env->canrestore++;
2505 set_cwp(cwp);
2510 void helper_restore(void)
2512 uint32_t cwp;
2514 cwp = cpu_cwp_inc(env, env->cwp + 1);
2515 if (env->canrestore == 0) {
2516 raise_exception(TT_FILL | (env->otherwin != 0 ?
2517 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2518 ((env->wstate & 0x7) << 2)));
2519 } else {
2520 env->cansave++;
2521 env->canrestore--;
2522 set_cwp(cwp);
2526 void helper_flushw(void)
2528 if (env->cansave != env->nwindows - 2) {
2529 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2530 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2531 ((env->wstate & 0x7) << 2)));
2535 void helper_saved(void)
2537 env->cansave++;
2538 if (env->otherwin == 0)
2539 env->canrestore--;
2540 else
2541 env->otherwin--;
2544 void helper_restored(void)
2546 env->canrestore++;
2547 if (env->cleanwin < env->nwindows - 1)
2548 env->cleanwin++;
2549 if (env->otherwin == 0)
2550 env->cansave--;
2551 else
2552 env->otherwin--;
2555 target_ulong helper_rdccr(void)
2557 return GET_CCR(env);
2560 void helper_wrccr(target_ulong new_ccr)
2562 PUT_CCR(env, new_ccr);
2565 // CWP handling is reversed in V9, but we still use the V8 register
2566 // order.
2567 target_ulong helper_rdcwp(void)
2569 return GET_CWP64(env);
2572 void helper_wrcwp(target_ulong new_cwp)
2574 PUT_CWP64(env, new_cwp);
2577 // This function uses non-native bit order
2578 #define GET_FIELD(X, FROM, TO) \
2579 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
2581 // This function uses the order in the manuals, i.e. bit 0 is 2^0
2582 #define GET_FIELD_SP(X, FROM, TO) \
2583 GET_FIELD(X, 63 - (TO), 63 - (FROM))
2585 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
2587 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
2588 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
2589 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
2590 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
2591 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
2592 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
2593 (((pixel_addr >> 55) & 1) << 4) |
2594 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
2595 GET_FIELD_SP(pixel_addr, 11, 12);
2598 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
2600 uint64_t tmp;
2602 tmp = addr + offset;
2603 env->gsr &= ~7ULL;
2604 env->gsr |= tmp & 7ULL;
2605 return tmp & ~7ULL;
2608 target_ulong helper_popc(target_ulong val)
2610 return ctpop64(val);
2613 static inline uint64_t *get_gregset(uint64_t pstate)
2615 switch (pstate) {
2616 default:
2617 case 0:
2618 return env->bgregs;
2619 case PS_AG:
2620 return env->agregs;
2621 case PS_MG:
2622 return env->mgregs;
2623 case PS_IG:
2624 return env->igregs;
2628 static inline void change_pstate(uint64_t new_pstate)
2630 uint64_t pstate_regs, new_pstate_regs;
2631 uint64_t *src, *dst;
2633 pstate_regs = env->pstate & 0xc01;
2634 new_pstate_regs = new_pstate & 0xc01;
2635 if (new_pstate_regs != pstate_regs) {
2636 // Switch global register bank
2637 src = get_gregset(new_pstate_regs);
2638 dst = get_gregset(pstate_regs);
2639 memcpy32(dst, env->gregs);
2640 memcpy32(env->gregs, src);
2642 env->pstate = new_pstate;
2645 void helper_wrpstate(target_ulong new_state)
2647 if (!(env->def->features & CPU_FEATURE_GL))
2648 change_pstate(new_state & 0xf3f);
2651 void helper_done(void)
2653 env->pc = env->tsptr->tpc;
2654 env->npc = env->tsptr->tnpc + 4;
2655 PUT_CCR(env, env->tsptr->tstate >> 32);
2656 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2657 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2658 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2659 env->tl--;
2660 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2663 void helper_retry(void)
2665 env->pc = env->tsptr->tpc;
2666 env->npc = env->tsptr->tnpc;
2667 PUT_CCR(env, env->tsptr->tstate >> 32);
2668 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2669 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2670 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2671 env->tl--;
2672 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2674 #endif
2676 void helper_flush(target_ulong addr)
2678 addr &= ~7;
2679 tb_invalidate_page_range(addr, addr + 8);
2682 #ifdef TARGET_SPARC64
2683 #ifdef DEBUG_PCALL
2684 static const char * const excp_names[0x80] = {
2685 [TT_TFAULT] = "Instruction Access Fault",
2686 [TT_TMISS] = "Instruction Access MMU Miss",
2687 [TT_CODE_ACCESS] = "Instruction Access Error",
2688 [TT_ILL_INSN] = "Illegal Instruction",
2689 [TT_PRIV_INSN] = "Privileged Instruction",
2690 [TT_NFPU_INSN] = "FPU Disabled",
2691 [TT_FP_EXCP] = "FPU Exception",
2692 [TT_TOVF] = "Tag Overflow",
2693 [TT_CLRWIN] = "Clean Windows",
2694 [TT_DIV_ZERO] = "Division By Zero",
2695 [TT_DFAULT] = "Data Access Fault",
2696 [TT_DMISS] = "Data Access MMU Miss",
2697 [TT_DATA_ACCESS] = "Data Access Error",
2698 [TT_DPROT] = "Data Protection Error",
2699 [TT_UNALIGNED] = "Unaligned Memory Access",
2700 [TT_PRIV_ACT] = "Privileged Action",
2701 [TT_EXTINT | 0x1] = "External Interrupt 1",
2702 [TT_EXTINT | 0x2] = "External Interrupt 2",
2703 [TT_EXTINT | 0x3] = "External Interrupt 3",
2704 [TT_EXTINT | 0x4] = "External Interrupt 4",
2705 [TT_EXTINT | 0x5] = "External Interrupt 5",
2706 [TT_EXTINT | 0x6] = "External Interrupt 6",
2707 [TT_EXTINT | 0x7] = "External Interrupt 7",
2708 [TT_EXTINT | 0x8] = "External Interrupt 8",
2709 [TT_EXTINT | 0x9] = "External Interrupt 9",
2710 [TT_EXTINT | 0xa] = "External Interrupt 10",
2711 [TT_EXTINT | 0xb] = "External Interrupt 11",
2712 [TT_EXTINT | 0xc] = "External Interrupt 12",
2713 [TT_EXTINT | 0xd] = "External Interrupt 13",
2714 [TT_EXTINT | 0xe] = "External Interrupt 14",
2715 [TT_EXTINT | 0xf] = "External Interrupt 15",
2717 #endif
2719 void do_interrupt(CPUState *env)
2721 int intno = env->exception_index;
2723 #ifdef DEBUG_PCALL
2724 if (loglevel & CPU_LOG_INT) {
2725 static int count;
2726 const char *name;
2728 if (intno < 0 || intno >= 0x180)
2729 name = "Unknown";
2730 else if (intno >= 0x100)
2731 name = "Trap Instruction";
2732 else if (intno >= 0xc0)
2733 name = "Window Fill";
2734 else if (intno >= 0x80)
2735 name = "Window Spill";
2736 else {
2737 name = excp_names[intno];
2738 if (!name)
2739 name = "Unknown";
2742 fprintf(logfile, "%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
2743 " SP=%016" PRIx64 "\n",
2744 count, name, intno,
2745 env->pc,
2746 env->npc, env->regwptr[6]);
2747 cpu_dump_state(env, logfile, fprintf, 0);
2748 #if 0
2750 int i;
2751 uint8_t *ptr;
2753 fprintf(logfile, " code=");
2754 ptr = (uint8_t *)env->pc;
2755 for(i = 0; i < 16; i++) {
2756 fprintf(logfile, " %02x", ldub(ptr + i));
2758 fprintf(logfile, "\n");
2760 #endif
2761 count++;
2763 #endif
2764 #if !defined(CONFIG_USER_ONLY)
2765 if (env->tl >= env->maxtl) {
2766 cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
2767 " Error state", env->exception_index, env->tl, env->maxtl);
2768 return;
2770 #endif
2771 if (env->tl < env->maxtl - 1) {
2772 env->tl++;
2773 } else {
2774 env->pstate |= PS_RED;
2775 if (env->tl < env->maxtl)
2776 env->tl++;
2778 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2779 env->tsptr->tstate = ((uint64_t)GET_CCR(env) << 32) |
2780 ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
2781 GET_CWP64(env);
2782 env->tsptr->tpc = env->pc;
2783 env->tsptr->tnpc = env->npc;
2784 env->tsptr->tt = intno;
2785 if (!(env->def->features & CPU_FEATURE_GL)) {
2786 switch (intno) {
2787 case TT_IVEC:
2788 change_pstate(PS_PEF | PS_PRIV | PS_IG);
2789 break;
2790 case TT_TFAULT:
2791 case TT_TMISS:
2792 case TT_DFAULT:
2793 case TT_DMISS:
2794 case TT_DPROT:
2795 change_pstate(PS_PEF | PS_PRIV | PS_MG);
2796 break;
2797 default:
2798 change_pstate(PS_PEF | PS_PRIV | PS_AG);
2799 break;
2802 if (intno == TT_CLRWIN)
2803 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
2804 else if ((intno & 0x1c0) == TT_SPILL)
2805 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
2806 else if ((intno & 0x1c0) == TT_FILL)
2807 cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
2808 env->tbr &= ~0x7fffULL;
2809 env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
2810 env->pc = env->tbr;
2811 env->npc = env->pc + 4;
2812 env->exception_index = 0;
2814 #else
2815 #ifdef DEBUG_PCALL
2816 static const char * const excp_names[0x80] = {
2817 [TT_TFAULT] = "Instruction Access Fault",
2818 [TT_ILL_INSN] = "Illegal Instruction",
2819 [TT_PRIV_INSN] = "Privileged Instruction",
2820 [TT_NFPU_INSN] = "FPU Disabled",
2821 [TT_WIN_OVF] = "Window Overflow",
2822 [TT_WIN_UNF] = "Window Underflow",
2823 [TT_UNALIGNED] = "Unaligned Memory Access",
2824 [TT_FP_EXCP] = "FPU Exception",
2825 [TT_DFAULT] = "Data Access Fault",
2826 [TT_TOVF] = "Tag Overflow",
2827 [TT_EXTINT | 0x1] = "External Interrupt 1",
2828 [TT_EXTINT | 0x2] = "External Interrupt 2",
2829 [TT_EXTINT | 0x3] = "External Interrupt 3",
2830 [TT_EXTINT | 0x4] = "External Interrupt 4",
2831 [TT_EXTINT | 0x5] = "External Interrupt 5",
2832 [TT_EXTINT | 0x6] = "External Interrupt 6",
2833 [TT_EXTINT | 0x7] = "External Interrupt 7",
2834 [TT_EXTINT | 0x8] = "External Interrupt 8",
2835 [TT_EXTINT | 0x9] = "External Interrupt 9",
2836 [TT_EXTINT | 0xa] = "External Interrupt 10",
2837 [TT_EXTINT | 0xb] = "External Interrupt 11",
2838 [TT_EXTINT | 0xc] = "External Interrupt 12",
2839 [TT_EXTINT | 0xd] = "External Interrupt 13",
2840 [TT_EXTINT | 0xe] = "External Interrupt 14",
2841 [TT_EXTINT | 0xf] = "External Interrupt 15",
2842 [TT_TOVF] = "Tag Overflow",
2843 [TT_CODE_ACCESS] = "Instruction Access Error",
2844 [TT_DATA_ACCESS] = "Data Access Error",
2845 [TT_DIV_ZERO] = "Division By Zero",
2846 [TT_NCP_INSN] = "Coprocessor Disabled",
2848 #endif
2850 void do_interrupt(CPUState *env)
2852 int cwp, intno = env->exception_index;
2854 #ifdef DEBUG_PCALL
2855 if (loglevel & CPU_LOG_INT) {
2856 static int count;
2857 const char *name;
2859 if (intno < 0 || intno >= 0x100)
2860 name = "Unknown";
2861 else if (intno >= 0x80)
2862 name = "Trap Instruction";
2863 else {
2864 name = excp_names[intno];
2865 if (!name)
2866 name = "Unknown";
2869 fprintf(logfile, "%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
2870 count, name, intno,
2871 env->pc,
2872 env->npc, env->regwptr[6]);
2873 cpu_dump_state(env, logfile, fprintf, 0);
2874 #if 0
2876 int i;
2877 uint8_t *ptr;
2879 fprintf(logfile, " code=");
2880 ptr = (uint8_t *)env->pc;
2881 for(i = 0; i < 16; i++) {
2882 fprintf(logfile, " %02x", ldub(ptr + i));
2884 fprintf(logfile, "\n");
2886 #endif
2887 count++;
2889 #endif
2890 #if !defined(CONFIG_USER_ONLY)
2891 if (env->psret == 0) {
2892 cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
2893 env->exception_index);
2894 return;
2896 #endif
2897 env->psret = 0;
2898 cwp = cpu_cwp_dec(env, env->cwp - 1);
2899 cpu_set_cwp(env, cwp);
2900 env->regwptr[9] = env->pc;
2901 env->regwptr[10] = env->npc;
2902 env->psrps = env->psrs;
2903 env->psrs = 1;
2904 env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
2905 env->pc = env->tbr;
2906 env->npc = env->pc + 4;
2907 env->exception_index = 0;
2909 #endif
2911 #if !defined(CONFIG_USER_ONLY)
2913 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
2914 void *retaddr);
2916 #define MMUSUFFIX _mmu
2917 #define ALIGNED_ONLY
2919 #define SHIFT 0
2920 #include "softmmu_template.h"
2922 #define SHIFT 1
2923 #include "softmmu_template.h"
2925 #define SHIFT 2
2926 #include "softmmu_template.h"
2928 #define SHIFT 3
2929 #include "softmmu_template.h"
2931 /* XXX: make it generic ? */
2932 static void cpu_restore_state2(void *retaddr)
2934 TranslationBlock *tb;
2935 unsigned long pc;
2937 if (retaddr) {
2938 /* now we have a real cpu fault */
2939 pc = (unsigned long)retaddr;
2940 tb = tb_find_pc(pc);
2941 if (tb) {
2942 /* the PC is inside the translated code. It means that we have
2943 a virtual CPU fault */
2944 cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
2949 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
2950 void *retaddr)
2952 #ifdef DEBUG_UNALIGNED
2953 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
2954 "\n", addr, env->pc);
2955 #endif
2956 cpu_restore_state2(retaddr);
2957 raise_exception(TT_UNALIGNED);
2960 /* try to fill the TLB and return an exception if error. If retaddr is
2961 NULL, it means that the function was called in C code (i.e. not
2962 from generated code or from helper.c) */
2963 /* XXX: fix it to restore all registers */
2964 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2966 int ret;
2967 CPUState *saved_env;
2969 /* XXX: hack to restore env in all cases, even if not called from
2970 generated code */
2971 saved_env = env;
2972 env = cpu_single_env;
2974 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2975 if (ret) {
2976 cpu_restore_state2(retaddr);
2977 cpu_loop_exit();
2979 env = saved_env;
2982 #endif
2984 #ifndef TARGET_SPARC64
2985 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
2986 int is_asi)
2988 CPUState *saved_env;
2990 /* XXX: hack to restore env in all cases, even if not called from
2991 generated code */
2992 saved_env = env;
2993 env = cpu_single_env;
2994 #ifdef DEBUG_UNASSIGNED
2995 if (is_asi)
2996 printf("Unassigned mem %s access to " TARGET_FMT_plx
2997 " asi 0x%02x from " TARGET_FMT_lx "\n",
2998 is_exec ? "exec" : is_write ? "write" : "read", addr, is_asi,
2999 env->pc);
3000 else
3001 printf("Unassigned mem %s access to " TARGET_FMT_plx " from "
3002 TARGET_FMT_lx "\n",
3003 is_exec ? "exec" : is_write ? "write" : "read", addr, env->pc);
3004 #endif
3005 if (env->mmuregs[3]) /* Fault status register */
3006 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
3007 if (is_asi)
3008 env->mmuregs[3] |= 1 << 16;
3009 if (env->psrs)
3010 env->mmuregs[3] |= 1 << 5;
3011 if (is_exec)
3012 env->mmuregs[3] |= 1 << 6;
3013 if (is_write)
3014 env->mmuregs[3] |= 1 << 7;
3015 env->mmuregs[3] |= (5 << 2) | 2;
3016 env->mmuregs[4] = addr; /* Fault address register */
3017 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
3018 if (is_exec)
3019 raise_exception(TT_CODE_ACCESS);
3020 else
3021 raise_exception(TT_DATA_ACCESS);
3023 env = saved_env;
3025 #else
3026 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3027 int is_asi)
3029 #ifdef DEBUG_UNASSIGNED
3030 CPUState *saved_env;
3032 /* XXX: hack to restore env in all cases, even if not called from
3033 generated code */
3034 saved_env = env;
3035 env = cpu_single_env;
3036 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
3037 "\n", addr, env->pc);
3038 env = saved_env;
3039 #endif
3040 if (is_exec)
3041 raise_exception(TT_CODE_ACCESS);
3042 else
3043 raise_exception(TT_DATA_ACCESS);
3045 #endif