block-qcow2: keep backing file format in a qcow2 extension (Uri Lublin)
[qemu-kvm/fedora.git] / target-sparc / op_helper.c
blob947a55ad15b7b50100d6162bd9cee1a3a06cc286
1 #include "exec.h"
2 #include "host-utils.h"
3 #include "helper.h"
4 #if !defined(CONFIG_USER_ONLY)
5 #include "softmmu_exec.h"
6 #endif /* !defined(CONFIG_USER_ONLY) */
8 //#define DEBUG_MMU
9 //#define DEBUG_MXCC
10 //#define DEBUG_UNALIGNED
11 //#define DEBUG_UNASSIGNED
12 //#define DEBUG_ASI
13 //#define DEBUG_PCALL
15 #ifdef DEBUG_MMU
16 #define DPRINTF_MMU(fmt, args...) \
17 do { printf("MMU: " fmt , ##args); } while (0)
18 #else
19 #define DPRINTF_MMU(fmt, args...) do {} while (0)
20 #endif
22 #ifdef DEBUG_MXCC
23 #define DPRINTF_MXCC(fmt, args...) \
24 do { printf("MXCC: " fmt , ##args); } while (0)
25 #else
26 #define DPRINTF_MXCC(fmt, args...) do {} while (0)
27 #endif
29 #ifdef DEBUG_ASI
30 #define DPRINTF_ASI(fmt, args...) \
31 do { printf("ASI: " fmt , ##args); } while (0)
32 #endif
34 #ifdef TARGET_SPARC64
35 #ifndef TARGET_ABI32
36 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
37 #else
38 #define AM_CHECK(env1) (1)
39 #endif
40 #endif
42 static inline void address_mask(CPUState *env1, target_ulong *addr)
44 #ifdef TARGET_SPARC64
45 if (AM_CHECK(env1))
46 *addr &= 0xffffffffULL;
47 #endif
50 static void raise_exception(int tt)
52 env->exception_index = tt;
53 cpu_loop_exit();
56 void HELPER(raise_exception)(int tt)
58 raise_exception(tt);
61 static inline void set_cwp(int new_cwp)
63 cpu_set_cwp(env, new_cwp);
66 void helper_check_align(target_ulong addr, uint32_t align)
68 if (addr & align) {
69 #ifdef DEBUG_UNALIGNED
70 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
71 "\n", addr, env->pc);
72 #endif
73 raise_exception(TT_UNALIGNED);
77 #define F_HELPER(name, p) void helper_f##name##p(void)
79 #define F_BINOP(name) \
80 float32 helper_f ## name ## s (float32 src1, float32 src2) \
81 { \
82 return float32_ ## name (src1, src2, &env->fp_status); \
83 } \
84 F_HELPER(name, d) \
85 { \
86 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
87 } \
88 F_HELPER(name, q) \
89 { \
90 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
93 F_BINOP(add);
94 F_BINOP(sub);
95 F_BINOP(mul);
96 F_BINOP(div);
97 #undef F_BINOP
99 void helper_fsmuld(float32 src1, float32 src2)
101 DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
102 float32_to_float64(src2, &env->fp_status),
103 &env->fp_status);
106 void helper_fdmulq(void)
108 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
109 float64_to_float128(DT1, &env->fp_status),
110 &env->fp_status);
113 float32 helper_fnegs(float32 src)
115 return float32_chs(src);
118 #ifdef TARGET_SPARC64
119 F_HELPER(neg, d)
121 DT0 = float64_chs(DT1);
124 F_HELPER(neg, q)
126 QT0 = float128_chs(QT1);
128 #endif
130 /* Integer to float conversion. */
131 float32 helper_fitos(int32_t src)
133 return int32_to_float32(src, &env->fp_status);
136 void helper_fitod(int32_t src)
138 DT0 = int32_to_float64(src, &env->fp_status);
141 void helper_fitoq(int32_t src)
143 QT0 = int32_to_float128(src, &env->fp_status);
146 #ifdef TARGET_SPARC64
147 float32 helper_fxtos(void)
149 return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
152 F_HELPER(xto, d)
154 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
157 F_HELPER(xto, q)
159 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
161 #endif
162 #undef F_HELPER
164 /* floating point conversion */
165 float32 helper_fdtos(void)
167 return float64_to_float32(DT1, &env->fp_status);
170 void helper_fstod(float32 src)
172 DT0 = float32_to_float64(src, &env->fp_status);
175 float32 helper_fqtos(void)
177 return float128_to_float32(QT1, &env->fp_status);
180 void helper_fstoq(float32 src)
182 QT0 = float32_to_float128(src, &env->fp_status);
185 void helper_fqtod(void)
187 DT0 = float128_to_float64(QT1, &env->fp_status);
190 void helper_fdtoq(void)
192 QT0 = float64_to_float128(DT1, &env->fp_status);
195 /* Float to integer conversion. */
196 int32_t helper_fstoi(float32 src)
198 return float32_to_int32_round_to_zero(src, &env->fp_status);
201 int32_t helper_fdtoi(void)
203 return float64_to_int32_round_to_zero(DT1, &env->fp_status);
206 int32_t helper_fqtoi(void)
208 return float128_to_int32_round_to_zero(QT1, &env->fp_status);
211 #ifdef TARGET_SPARC64
212 void helper_fstox(float32 src)
214 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
217 void helper_fdtox(void)
219 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
222 void helper_fqtox(void)
224 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
227 void helper_faligndata(void)
229 uint64_t tmp;
231 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
232 /* on many architectures a shift of 64 does nothing */
233 if ((env->gsr & 7) != 0) {
234 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
236 *((uint64_t *)&DT0) = tmp;
239 #ifdef WORDS_BIGENDIAN
240 #define VIS_B64(n) b[7 - (n)]
241 #define VIS_W64(n) w[3 - (n)]
242 #define VIS_SW64(n) sw[3 - (n)]
243 #define VIS_L64(n) l[1 - (n)]
244 #define VIS_B32(n) b[3 - (n)]
245 #define VIS_W32(n) w[1 - (n)]
246 #else
247 #define VIS_B64(n) b[n]
248 #define VIS_W64(n) w[n]
249 #define VIS_SW64(n) sw[n]
250 #define VIS_L64(n) l[n]
251 #define VIS_B32(n) b[n]
252 #define VIS_W32(n) w[n]
253 #endif
255 typedef union {
256 uint8_t b[8];
257 uint16_t w[4];
258 int16_t sw[4];
259 uint32_t l[2];
260 float64 d;
261 } vis64;
263 typedef union {
264 uint8_t b[4];
265 uint16_t w[2];
266 uint32_t l;
267 float32 f;
268 } vis32;
270 void helper_fpmerge(void)
272 vis64 s, d;
274 s.d = DT0;
275 d.d = DT1;
277 // Reverse calculation order to handle overlap
278 d.VIS_B64(7) = s.VIS_B64(3);
279 d.VIS_B64(6) = d.VIS_B64(3);
280 d.VIS_B64(5) = s.VIS_B64(2);
281 d.VIS_B64(4) = d.VIS_B64(2);
282 d.VIS_B64(3) = s.VIS_B64(1);
283 d.VIS_B64(2) = d.VIS_B64(1);
284 d.VIS_B64(1) = s.VIS_B64(0);
285 //d.VIS_B64(0) = d.VIS_B64(0);
287 DT0 = d.d;
290 void helper_fmul8x16(void)
292 vis64 s, d;
293 uint32_t tmp;
295 s.d = DT0;
296 d.d = DT1;
298 #define PMUL(r) \
299 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
300 if ((tmp & 0xff) > 0x7f) \
301 tmp += 0x100; \
302 d.VIS_W64(r) = tmp >> 8;
304 PMUL(0);
305 PMUL(1);
306 PMUL(2);
307 PMUL(3);
308 #undef PMUL
310 DT0 = d.d;
313 void helper_fmul8x16al(void)
315 vis64 s, d;
316 uint32_t tmp;
318 s.d = DT0;
319 d.d = DT1;
321 #define PMUL(r) \
322 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
323 if ((tmp & 0xff) > 0x7f) \
324 tmp += 0x100; \
325 d.VIS_W64(r) = tmp >> 8;
327 PMUL(0);
328 PMUL(1);
329 PMUL(2);
330 PMUL(3);
331 #undef PMUL
333 DT0 = d.d;
336 void helper_fmul8x16au(void)
338 vis64 s, d;
339 uint32_t tmp;
341 s.d = DT0;
342 d.d = DT1;
344 #define PMUL(r) \
345 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
346 if ((tmp & 0xff) > 0x7f) \
347 tmp += 0x100; \
348 d.VIS_W64(r) = tmp >> 8;
350 PMUL(0);
351 PMUL(1);
352 PMUL(2);
353 PMUL(3);
354 #undef PMUL
356 DT0 = d.d;
359 void helper_fmul8sux16(void)
361 vis64 s, d;
362 uint32_t tmp;
364 s.d = DT0;
365 d.d = DT1;
367 #define PMUL(r) \
368 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
369 if ((tmp & 0xff) > 0x7f) \
370 tmp += 0x100; \
371 d.VIS_W64(r) = tmp >> 8;
373 PMUL(0);
374 PMUL(1);
375 PMUL(2);
376 PMUL(3);
377 #undef PMUL
379 DT0 = d.d;
382 void helper_fmul8ulx16(void)
384 vis64 s, d;
385 uint32_t tmp;
387 s.d = DT0;
388 d.d = DT1;
390 #define PMUL(r) \
391 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
392 if ((tmp & 0xff) > 0x7f) \
393 tmp += 0x100; \
394 d.VIS_W64(r) = tmp >> 8;
396 PMUL(0);
397 PMUL(1);
398 PMUL(2);
399 PMUL(3);
400 #undef PMUL
402 DT0 = d.d;
405 void helper_fmuld8sux16(void)
407 vis64 s, d;
408 uint32_t tmp;
410 s.d = DT0;
411 d.d = DT1;
413 #define PMUL(r) \
414 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
415 if ((tmp & 0xff) > 0x7f) \
416 tmp += 0x100; \
417 d.VIS_L64(r) = tmp;
419 // Reverse calculation order to handle overlap
420 PMUL(1);
421 PMUL(0);
422 #undef PMUL
424 DT0 = d.d;
427 void helper_fmuld8ulx16(void)
429 vis64 s, d;
430 uint32_t tmp;
432 s.d = DT0;
433 d.d = DT1;
435 #define PMUL(r) \
436 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
437 if ((tmp & 0xff) > 0x7f) \
438 tmp += 0x100; \
439 d.VIS_L64(r) = tmp;
441 // Reverse calculation order to handle overlap
442 PMUL(1);
443 PMUL(0);
444 #undef PMUL
446 DT0 = d.d;
449 void helper_fexpand(void)
451 vis32 s;
452 vis64 d;
454 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
455 d.d = DT1;
456 d.VIS_W64(0) = s.VIS_B32(0) << 4;
457 d.VIS_W64(1) = s.VIS_B32(1) << 4;
458 d.VIS_W64(2) = s.VIS_B32(2) << 4;
459 d.VIS_W64(3) = s.VIS_B32(3) << 4;
461 DT0 = d.d;
464 #define VIS_HELPER(name, F) \
465 void name##16(void) \
467 vis64 s, d; \
469 s.d = DT0; \
470 d.d = DT1; \
472 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
473 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
474 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
475 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
477 DT0 = d.d; \
480 uint32_t name##16s(uint32_t src1, uint32_t src2) \
482 vis32 s, d; \
484 s.l = src1; \
485 d.l = src2; \
487 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
488 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
490 return d.l; \
493 void name##32(void) \
495 vis64 s, d; \
497 s.d = DT0; \
498 d.d = DT1; \
500 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
501 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
503 DT0 = d.d; \
506 uint32_t name##32s(uint32_t src1, uint32_t src2) \
508 vis32 s, d; \
510 s.l = src1; \
511 d.l = src2; \
513 d.l = F(d.l, s.l); \
515 return d.l; \
518 #define FADD(a, b) ((a) + (b))
519 #define FSUB(a, b) ((a) - (b))
520 VIS_HELPER(helper_fpadd, FADD)
521 VIS_HELPER(helper_fpsub, FSUB)
523 #define VIS_CMPHELPER(name, F) \
524 void name##16(void) \
526 vis64 s, d; \
528 s.d = DT0; \
529 d.d = DT1; \
531 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
532 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
533 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
534 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
536 DT0 = d.d; \
539 void name##32(void) \
541 vis64 s, d; \
543 s.d = DT0; \
544 d.d = DT1; \
546 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
547 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
549 DT0 = d.d; \
552 #define FCMPGT(a, b) ((a) > (b))
553 #define FCMPEQ(a, b) ((a) == (b))
554 #define FCMPLE(a, b) ((a) <= (b))
555 #define FCMPNE(a, b) ((a) != (b))
557 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
558 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
559 VIS_CMPHELPER(helper_fcmple, FCMPLE)
560 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
561 #endif
563 void helper_check_ieee_exceptions(void)
565 target_ulong status;
567 status = get_float_exception_flags(&env->fp_status);
568 if (status) {
569 /* Copy IEEE 754 flags into FSR */
570 if (status & float_flag_invalid)
571 env->fsr |= FSR_NVC;
572 if (status & float_flag_overflow)
573 env->fsr |= FSR_OFC;
574 if (status & float_flag_underflow)
575 env->fsr |= FSR_UFC;
576 if (status & float_flag_divbyzero)
577 env->fsr |= FSR_DZC;
578 if (status & float_flag_inexact)
579 env->fsr |= FSR_NXC;
581 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
582 /* Unmasked exception, generate a trap */
583 env->fsr |= FSR_FTT_IEEE_EXCP;
584 raise_exception(TT_FP_EXCP);
585 } else {
586 /* Accumulate exceptions */
587 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
592 void helper_clear_float_exceptions(void)
594 set_float_exception_flags(0, &env->fp_status);
597 float32 helper_fabss(float32 src)
599 return float32_abs(src);
602 #ifdef TARGET_SPARC64
603 void helper_fabsd(void)
605 DT0 = float64_abs(DT1);
608 void helper_fabsq(void)
610 QT0 = float128_abs(QT1);
612 #endif
614 float32 helper_fsqrts(float32 src)
616 return float32_sqrt(src, &env->fp_status);
619 void helper_fsqrtd(void)
621 DT0 = float64_sqrt(DT1, &env->fp_status);
624 void helper_fsqrtq(void)
626 QT0 = float128_sqrt(QT1, &env->fp_status);
629 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
630 void glue(helper_, name) (void) \
632 target_ulong new_fsr; \
634 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
635 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
636 case float_relation_unordered: \
637 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
638 if ((env->fsr & FSR_NVM) || TRAP) { \
639 env->fsr |= new_fsr; \
640 env->fsr |= FSR_NVC; \
641 env->fsr |= FSR_FTT_IEEE_EXCP; \
642 raise_exception(TT_FP_EXCP); \
643 } else { \
644 env->fsr |= FSR_NVA; \
646 break; \
647 case float_relation_less: \
648 new_fsr = FSR_FCC0 << FS; \
649 break; \
650 case float_relation_greater: \
651 new_fsr = FSR_FCC1 << FS; \
652 break; \
653 default: \
654 new_fsr = 0; \
655 break; \
657 env->fsr |= new_fsr; \
659 #define GEN_FCMPS(name, size, FS, TRAP) \
660 void glue(helper_, name)(float32 src1, float32 src2) \
662 target_ulong new_fsr; \
664 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
665 switch (glue(size, _compare) (src1, src2, &env->fp_status)) { \
666 case float_relation_unordered: \
667 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
668 if ((env->fsr & FSR_NVM) || TRAP) { \
669 env->fsr |= new_fsr; \
670 env->fsr |= FSR_NVC; \
671 env->fsr |= FSR_FTT_IEEE_EXCP; \
672 raise_exception(TT_FP_EXCP); \
673 } else { \
674 env->fsr |= FSR_NVA; \
676 break; \
677 case float_relation_less: \
678 new_fsr = FSR_FCC0 << FS; \
679 break; \
680 case float_relation_greater: \
681 new_fsr = FSR_FCC1 << FS; \
682 break; \
683 default: \
684 new_fsr = 0; \
685 break; \
687 env->fsr |= new_fsr; \
690 GEN_FCMPS(fcmps, float32, 0, 0);
691 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
693 GEN_FCMPS(fcmpes, float32, 0, 1);
694 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
696 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
697 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
699 #ifdef TARGET_SPARC64
700 GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
701 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
702 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
704 GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
705 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
706 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
708 GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
709 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
710 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
712 GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
713 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
714 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
716 GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
717 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
718 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
720 GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
721 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
722 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
723 #endif
724 #undef GEN_FCMPS
726 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
727 defined(DEBUG_MXCC)
728 static void dump_mxcc(CPUState *env)
730 printf("mxccdata: %016llx %016llx %016llx %016llx\n",
731 env->mxccdata[0], env->mxccdata[1],
732 env->mxccdata[2], env->mxccdata[3]);
733 printf("mxccregs: %016llx %016llx %016llx %016llx\n"
734 " %016llx %016llx %016llx %016llx\n",
735 env->mxccregs[0], env->mxccregs[1],
736 env->mxccregs[2], env->mxccregs[3],
737 env->mxccregs[4], env->mxccregs[5],
738 env->mxccregs[6], env->mxccregs[7]);
740 #endif
742 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
743 && defined(DEBUG_ASI)
744 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
745 uint64_t r1)
747 switch (size)
749 case 1:
750 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
751 addr, asi, r1 & 0xff);
752 break;
753 case 2:
754 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
755 addr, asi, r1 & 0xffff);
756 break;
757 case 4:
758 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
759 addr, asi, r1 & 0xffffffff);
760 break;
761 case 8:
762 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
763 addr, asi, r1);
764 break;
767 #endif
769 #ifndef TARGET_SPARC64
770 #ifndef CONFIG_USER_ONLY
771 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
773 uint64_t ret = 0;
774 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
775 uint32_t last_addr = addr;
776 #endif
778 helper_check_align(addr, size - 1);
779 switch (asi) {
780 case 2: /* SuperSparc MXCC registers */
781 switch (addr) {
782 case 0x01c00a00: /* MXCC control register */
783 if (size == 8)
784 ret = env->mxccregs[3];
785 else
786 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
787 size);
788 break;
789 case 0x01c00a04: /* MXCC control register */
790 if (size == 4)
791 ret = env->mxccregs[3];
792 else
793 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
794 size);
795 break;
796 case 0x01c00c00: /* Module reset register */
797 if (size == 8) {
798 ret = env->mxccregs[5];
799 // should we do something here?
800 } else
801 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
802 size);
803 break;
804 case 0x01c00f00: /* MBus port address register */
805 if (size == 8)
806 ret = env->mxccregs[7];
807 else
808 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
809 size);
810 break;
811 default:
812 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
813 size);
814 break;
816 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
817 "addr = %08x -> ret = %" PRIx64 ","
818 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
819 #ifdef DEBUG_MXCC
820 dump_mxcc(env);
821 #endif
822 break;
823 case 3: /* MMU probe */
825 int mmulev;
827 mmulev = (addr >> 8) & 15;
828 if (mmulev > 4)
829 ret = 0;
830 else
831 ret = mmu_probe(env, addr, mmulev);
832 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
833 addr, mmulev, ret);
835 break;
836 case 4: /* read MMU regs */
838 int reg = (addr >> 8) & 0x1f;
840 ret = env->mmuregs[reg];
841 if (reg == 3) /* Fault status cleared on read */
842 env->mmuregs[3] = 0;
843 else if (reg == 0x13) /* Fault status read */
844 ret = env->mmuregs[3];
845 else if (reg == 0x14) /* Fault address read */
846 ret = env->mmuregs[4];
847 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
849 break;
850 case 5: // Turbosparc ITLB Diagnostic
851 case 6: // Turbosparc DTLB Diagnostic
852 case 7: // Turbosparc IOTLB Diagnostic
853 break;
854 case 9: /* Supervisor code access */
855 switch(size) {
856 case 1:
857 ret = ldub_code(addr);
858 break;
859 case 2:
860 ret = lduw_code(addr);
861 break;
862 default:
863 case 4:
864 ret = ldl_code(addr);
865 break;
866 case 8:
867 ret = ldq_code(addr);
868 break;
870 break;
871 case 0xa: /* User data access */
872 switch(size) {
873 case 1:
874 ret = ldub_user(addr);
875 break;
876 case 2:
877 ret = lduw_user(addr);
878 break;
879 default:
880 case 4:
881 ret = ldl_user(addr);
882 break;
883 case 8:
884 ret = ldq_user(addr);
885 break;
887 break;
888 case 0xb: /* Supervisor data access */
889 switch(size) {
890 case 1:
891 ret = ldub_kernel(addr);
892 break;
893 case 2:
894 ret = lduw_kernel(addr);
895 break;
896 default:
897 case 4:
898 ret = ldl_kernel(addr);
899 break;
900 case 8:
901 ret = ldq_kernel(addr);
902 break;
904 break;
905 case 0xc: /* I-cache tag */
906 case 0xd: /* I-cache data */
907 case 0xe: /* D-cache tag */
908 case 0xf: /* D-cache data */
909 break;
910 case 0x20: /* MMU passthrough */
911 switch(size) {
912 case 1:
913 ret = ldub_phys(addr);
914 break;
915 case 2:
916 ret = lduw_phys(addr);
917 break;
918 default:
919 case 4:
920 ret = ldl_phys(addr);
921 break;
922 case 8:
923 ret = ldq_phys(addr);
924 break;
926 break;
927 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
928 switch(size) {
929 case 1:
930 ret = ldub_phys((target_phys_addr_t)addr
931 | ((target_phys_addr_t)(asi & 0xf) << 32));
932 break;
933 case 2:
934 ret = lduw_phys((target_phys_addr_t)addr
935 | ((target_phys_addr_t)(asi & 0xf) << 32));
936 break;
937 default:
938 case 4:
939 ret = ldl_phys((target_phys_addr_t)addr
940 | ((target_phys_addr_t)(asi & 0xf) << 32));
941 break;
942 case 8:
943 ret = ldq_phys((target_phys_addr_t)addr
944 | ((target_phys_addr_t)(asi & 0xf) << 32));
945 break;
947 break;
948 case 0x30: // Turbosparc secondary cache diagnostic
949 case 0x31: // Turbosparc RAM snoop
950 case 0x32: // Turbosparc page table descriptor diagnostic
951 case 0x39: /* data cache diagnostic register */
952 ret = 0;
953 break;
954 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
956 int reg = (addr >> 8) & 3;
958 switch(reg) {
959 case 0: /* Breakpoint Value (Addr) */
960 ret = env->mmubpregs[reg];
961 break;
962 case 1: /* Breakpoint Mask */
963 ret = env->mmubpregs[reg];
964 break;
965 case 2: /* Breakpoint Control */
966 ret = env->mmubpregs[reg];
967 break;
968 case 3: /* Breakpoint Status */
969 ret = env->mmubpregs[reg];
970 env->mmubpregs[reg] = 0ULL;
971 break;
973 DPRINTF_MMU("read breakpoint reg[%d] 0x%016llx\n", reg, ret);
975 break;
976 case 8: /* User code access, XXX */
977 default:
978 do_unassigned_access(addr, 0, 0, asi, size);
979 ret = 0;
980 break;
982 if (sign) {
983 switch(size) {
984 case 1:
985 ret = (int8_t) ret;
986 break;
987 case 2:
988 ret = (int16_t) ret;
989 break;
990 case 4:
991 ret = (int32_t) ret;
992 break;
993 default:
994 break;
997 #ifdef DEBUG_ASI
998 dump_asi("read ", last_addr, asi, size, ret);
999 #endif
1000 return ret;
1003 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1005 helper_check_align(addr, size - 1);
1006 switch(asi) {
1007 case 2: /* SuperSparc MXCC registers */
1008 switch (addr) {
1009 case 0x01c00000: /* MXCC stream data register 0 */
1010 if (size == 8)
1011 env->mxccdata[0] = val;
1012 else
1013 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1014 size);
1015 break;
1016 case 0x01c00008: /* MXCC stream data register 1 */
1017 if (size == 8)
1018 env->mxccdata[1] = val;
1019 else
1020 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1021 size);
1022 break;
1023 case 0x01c00010: /* MXCC stream data register 2 */
1024 if (size == 8)
1025 env->mxccdata[2] = val;
1026 else
1027 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1028 size);
1029 break;
1030 case 0x01c00018: /* MXCC stream data register 3 */
1031 if (size == 8)
1032 env->mxccdata[3] = val;
1033 else
1034 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1035 size);
1036 break;
1037 case 0x01c00100: /* MXCC stream source */
1038 if (size == 8)
1039 env->mxccregs[0] = val;
1040 else
1041 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1042 size);
1043 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1045 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1047 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1048 16);
1049 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1050 24);
1051 break;
1052 case 0x01c00200: /* MXCC stream destination */
1053 if (size == 8)
1054 env->mxccregs[1] = val;
1055 else
1056 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1057 size);
1058 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
1059 env->mxccdata[0]);
1060 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
1061 env->mxccdata[1]);
1062 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1063 env->mxccdata[2]);
1064 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1065 env->mxccdata[3]);
1066 break;
1067 case 0x01c00a00: /* MXCC control register */
1068 if (size == 8)
1069 env->mxccregs[3] = val;
1070 else
1071 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1072 size);
1073 break;
1074 case 0x01c00a04: /* MXCC control register */
1075 if (size == 4)
1076 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
1077 | val;
1078 else
1079 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1080 size);
1081 break;
1082 case 0x01c00e00: /* MXCC error register */
1083 // writing a 1 bit clears the error
1084 if (size == 8)
1085 env->mxccregs[6] &= ~val;
1086 else
1087 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1088 size);
1089 break;
1090 case 0x01c00f00: /* MBus port address register */
1091 if (size == 8)
1092 env->mxccregs[7] = val;
1093 else
1094 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1095 size);
1096 break;
1097 default:
1098 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1099 size);
1100 break;
1102 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
1103 asi, size, addr, val);
1104 #ifdef DEBUG_MXCC
1105 dump_mxcc(env);
1106 #endif
1107 break;
1108 case 3: /* MMU flush */
1110 int mmulev;
1112 mmulev = (addr >> 8) & 15;
1113 DPRINTF_MMU("mmu flush level %d\n", mmulev);
1114 switch (mmulev) {
1115 case 0: // flush page
1116 tlb_flush_page(env, addr & 0xfffff000);
1117 break;
1118 case 1: // flush segment (256k)
1119 case 2: // flush region (16M)
1120 case 3: // flush context (4G)
1121 case 4: // flush entire
1122 tlb_flush(env, 1);
1123 break;
1124 default:
1125 break;
1127 #ifdef DEBUG_MMU
1128 dump_mmu(env);
1129 #endif
1131 break;
1132 case 4: /* write MMU regs */
1134 int reg = (addr >> 8) & 0x1f;
1135 uint32_t oldreg;
1137 oldreg = env->mmuregs[reg];
1138 switch(reg) {
1139 case 0: // Control Register
1140 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1141 (val & 0x00ffffff);
1142 // Mappings generated during no-fault mode or MMU
1143 // disabled mode are invalid in normal mode
1144 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1145 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1146 tlb_flush(env, 1);
1147 break;
1148 case 1: // Context Table Pointer Register
1149 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1150 break;
1151 case 2: // Context Register
1152 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1153 if (oldreg != env->mmuregs[reg]) {
1154 /* we flush when the MMU context changes because
1155 QEMU has no MMU context support */
1156 tlb_flush(env, 1);
1158 break;
1159 case 3: // Synchronous Fault Status Register with Clear
1160 case 4: // Synchronous Fault Address Register
1161 break;
1162 case 0x10: // TLB Replacement Control Register
1163 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1164 break;
1165 case 0x13: // Synchronous Fault Status Register with Read and Clear
1166 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
1167 break;
1168 case 0x14: // Synchronous Fault Address Register
1169 env->mmuregs[4] = val;
1170 break;
1171 default:
1172 env->mmuregs[reg] = val;
1173 break;
1175 if (oldreg != env->mmuregs[reg]) {
1176 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1177 reg, oldreg, env->mmuregs[reg]);
1179 #ifdef DEBUG_MMU
1180 dump_mmu(env);
1181 #endif
1183 break;
1184 case 5: // Turbosparc ITLB Diagnostic
1185 case 6: // Turbosparc DTLB Diagnostic
1186 case 7: // Turbosparc IOTLB Diagnostic
1187 break;
1188 case 0xa: /* User data access */
1189 switch(size) {
1190 case 1:
1191 stb_user(addr, val);
1192 break;
1193 case 2:
1194 stw_user(addr, val);
1195 break;
1196 default:
1197 case 4:
1198 stl_user(addr, val);
1199 break;
1200 case 8:
1201 stq_user(addr, val);
1202 break;
1204 break;
1205 case 0xb: /* Supervisor data access */
1206 switch(size) {
1207 case 1:
1208 stb_kernel(addr, val);
1209 break;
1210 case 2:
1211 stw_kernel(addr, val);
1212 break;
1213 default:
1214 case 4:
1215 stl_kernel(addr, val);
1216 break;
1217 case 8:
1218 stq_kernel(addr, val);
1219 break;
1221 break;
1222 case 0xc: /* I-cache tag */
1223 case 0xd: /* I-cache data */
1224 case 0xe: /* D-cache tag */
1225 case 0xf: /* D-cache data */
1226 case 0x10: /* I/D-cache flush page */
1227 case 0x11: /* I/D-cache flush segment */
1228 case 0x12: /* I/D-cache flush region */
1229 case 0x13: /* I/D-cache flush context */
1230 case 0x14: /* I/D-cache flush user */
1231 break;
1232 case 0x17: /* Block copy, sta access */
1234 // val = src
1235 // addr = dst
1236 // copy 32 bytes
1237 unsigned int i;
1238 uint32_t src = val & ~3, dst = addr & ~3, temp;
1240 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
1241 temp = ldl_kernel(src);
1242 stl_kernel(dst, temp);
1245 break;
1246 case 0x1f: /* Block fill, stda access */
1248 // addr = dst
1249 // fill 32 bytes with val
1250 unsigned int i;
1251 uint32_t dst = addr & 7;
1253 for (i = 0; i < 32; i += 8, dst += 8)
1254 stq_kernel(dst, val);
1256 break;
1257 case 0x20: /* MMU passthrough */
1259 switch(size) {
1260 case 1:
1261 stb_phys(addr, val);
1262 break;
1263 case 2:
1264 stw_phys(addr, val);
1265 break;
1266 case 4:
1267 default:
1268 stl_phys(addr, val);
1269 break;
1270 case 8:
1271 stq_phys(addr, val);
1272 break;
1275 break;
1276 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1278 switch(size) {
1279 case 1:
1280 stb_phys((target_phys_addr_t)addr
1281 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1282 break;
1283 case 2:
1284 stw_phys((target_phys_addr_t)addr
1285 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1286 break;
1287 case 4:
1288 default:
1289 stl_phys((target_phys_addr_t)addr
1290 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1291 break;
1292 case 8:
1293 stq_phys((target_phys_addr_t)addr
1294 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1295 break;
1298 break;
1299 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1300 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1301 // Turbosparc snoop RAM
1302 case 0x32: // store buffer control or Turbosparc page table
1303 // descriptor diagnostic
1304 case 0x36: /* I-cache flash clear */
1305 case 0x37: /* D-cache flash clear */
1306 case 0x4c: /* breakpoint action */
1307 break;
1308 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1310 int reg = (addr >> 8) & 3;
1312 switch(reg) {
1313 case 0: /* Breakpoint Value (Addr) */
1314 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1315 break;
1316 case 1: /* Breakpoint Mask */
1317 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1318 break;
1319 case 2: /* Breakpoint Control */
1320 env->mmubpregs[reg] = (val & 0x7fULL);
1321 break;
1322 case 3: /* Breakpoint Status */
1323 env->mmubpregs[reg] = (val & 0xfULL);
1324 break;
1326 DPRINTF_MMU("write breakpoint reg[%d] 0x%016llx\n", reg,
1327 env->mmuregs[reg]);
1329 break;
1330 case 8: /* User code access, XXX */
1331 case 9: /* Supervisor code access, XXX */
1332 default:
1333 do_unassigned_access(addr, 1, 0, asi, size);
1334 break;
1336 #ifdef DEBUG_ASI
1337 dump_asi("write", addr, asi, size, val);
1338 #endif
1341 #endif /* CONFIG_USER_ONLY */
1342 #else /* TARGET_SPARC64 */
1344 #ifdef CONFIG_USER_ONLY
1345 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1347 uint64_t ret = 0;
1348 #if defined(DEBUG_ASI)
1349 target_ulong last_addr = addr;
1350 #endif
1352 if (asi < 0x80)
1353 raise_exception(TT_PRIV_ACT);
1355 helper_check_align(addr, size - 1);
1356 address_mask(env, &addr);
1358 switch (asi) {
1359 case 0x82: // Primary no-fault
1360 case 0x8a: // Primary no-fault LE
1361 if (page_check_range(addr, size, PAGE_READ) == -1) {
1362 #ifdef DEBUG_ASI
1363 dump_asi("read ", last_addr, asi, size, ret);
1364 #endif
1365 return 0;
1367 // Fall through
1368 case 0x80: // Primary
1369 case 0x88: // Primary LE
1371 switch(size) {
1372 case 1:
1373 ret = ldub_raw(addr);
1374 break;
1375 case 2:
1376 ret = lduw_raw(addr);
1377 break;
1378 case 4:
1379 ret = ldl_raw(addr);
1380 break;
1381 default:
1382 case 8:
1383 ret = ldq_raw(addr);
1384 break;
1387 break;
1388 case 0x83: // Secondary no-fault
1389 case 0x8b: // Secondary no-fault LE
1390 if (page_check_range(addr, size, PAGE_READ) == -1) {
1391 #ifdef DEBUG_ASI
1392 dump_asi("read ", last_addr, asi, size, ret);
1393 #endif
1394 return 0;
1396 // Fall through
1397 case 0x81: // Secondary
1398 case 0x89: // Secondary LE
1399 // XXX
1400 break;
1401 default:
1402 break;
1405 /* Convert from little endian */
1406 switch (asi) {
1407 case 0x88: // Primary LE
1408 case 0x89: // Secondary LE
1409 case 0x8a: // Primary no-fault LE
1410 case 0x8b: // Secondary no-fault LE
1411 switch(size) {
1412 case 2:
1413 ret = bswap16(ret);
1414 break;
1415 case 4:
1416 ret = bswap32(ret);
1417 break;
1418 case 8:
1419 ret = bswap64(ret);
1420 break;
1421 default:
1422 break;
1424 default:
1425 break;
1428 /* Convert to signed number */
1429 if (sign) {
1430 switch(size) {
1431 case 1:
1432 ret = (int8_t) ret;
1433 break;
1434 case 2:
1435 ret = (int16_t) ret;
1436 break;
1437 case 4:
1438 ret = (int32_t) ret;
1439 break;
1440 default:
1441 break;
1444 #ifdef DEBUG_ASI
1445 dump_asi("read ", last_addr, asi, size, ret);
1446 #endif
1447 return ret;
1450 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1452 #ifdef DEBUG_ASI
1453 dump_asi("write", addr, asi, size, val);
1454 #endif
1455 if (asi < 0x80)
1456 raise_exception(TT_PRIV_ACT);
1458 helper_check_align(addr, size - 1);
1459 address_mask(env, &addr);
1461 /* Convert to little endian */
1462 switch (asi) {
1463 case 0x88: // Primary LE
1464 case 0x89: // Secondary LE
1465 switch(size) {
1466 case 2:
1467 addr = bswap16(addr);
1468 break;
1469 case 4:
1470 addr = bswap32(addr);
1471 break;
1472 case 8:
1473 addr = bswap64(addr);
1474 break;
1475 default:
1476 break;
1478 default:
1479 break;
1482 switch(asi) {
1483 case 0x80: // Primary
1484 case 0x88: // Primary LE
1486 switch(size) {
1487 case 1:
1488 stb_raw(addr, val);
1489 break;
1490 case 2:
1491 stw_raw(addr, val);
1492 break;
1493 case 4:
1494 stl_raw(addr, val);
1495 break;
1496 case 8:
1497 default:
1498 stq_raw(addr, val);
1499 break;
1502 break;
1503 case 0x81: // Secondary
1504 case 0x89: // Secondary LE
1505 // XXX
1506 return;
1508 case 0x82: // Primary no-fault, RO
1509 case 0x83: // Secondary no-fault, RO
1510 case 0x8a: // Primary no-fault LE, RO
1511 case 0x8b: // Secondary no-fault LE, RO
1512 default:
1513 do_unassigned_access(addr, 1, 0, 1, size);
1514 return;
1518 #else /* CONFIG_USER_ONLY */
1520 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1522 uint64_t ret = 0;
1523 #if defined(DEBUG_ASI)
1524 target_ulong last_addr = addr;
1525 #endif
1527 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1528 || ((env->def->features & CPU_FEATURE_HYPV)
1529 && asi >= 0x30 && asi < 0x80
1530 && !(env->hpstate & HS_PRIV)))
1531 raise_exception(TT_PRIV_ACT);
1533 helper_check_align(addr, size - 1);
1534 switch (asi) {
1535 case 0x82: // Primary no-fault
1536 case 0x8a: // Primary no-fault LE
1537 if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
1538 #ifdef DEBUG_ASI
1539 dump_asi("read ", last_addr, asi, size, ret);
1540 #endif
1541 return 0;
1543 // Fall through
1544 case 0x10: // As if user primary
1545 case 0x18: // As if user primary LE
1546 case 0x80: // Primary
1547 case 0x88: // Primary LE
1548 case 0xe2: // UA2007 Primary block init
1549 case 0xe3: // UA2007 Secondary block init
1550 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1551 if ((env->def->features & CPU_FEATURE_HYPV)
1552 && env->hpstate & HS_PRIV) {
1553 switch(size) {
1554 case 1:
1555 ret = ldub_hypv(addr);
1556 break;
1557 case 2:
1558 ret = lduw_hypv(addr);
1559 break;
1560 case 4:
1561 ret = ldl_hypv(addr);
1562 break;
1563 default:
1564 case 8:
1565 ret = ldq_hypv(addr);
1566 break;
1568 } else {
1569 switch(size) {
1570 case 1:
1571 ret = ldub_kernel(addr);
1572 break;
1573 case 2:
1574 ret = lduw_kernel(addr);
1575 break;
1576 case 4:
1577 ret = ldl_kernel(addr);
1578 break;
1579 default:
1580 case 8:
1581 ret = ldq_kernel(addr);
1582 break;
1585 } else {
1586 switch(size) {
1587 case 1:
1588 ret = ldub_user(addr);
1589 break;
1590 case 2:
1591 ret = lduw_user(addr);
1592 break;
1593 case 4:
1594 ret = ldl_user(addr);
1595 break;
1596 default:
1597 case 8:
1598 ret = ldq_user(addr);
1599 break;
1602 break;
1603 case 0x14: // Bypass
1604 case 0x15: // Bypass, non-cacheable
1605 case 0x1c: // Bypass LE
1606 case 0x1d: // Bypass, non-cacheable LE
1608 switch(size) {
1609 case 1:
1610 ret = ldub_phys(addr);
1611 break;
1612 case 2:
1613 ret = lduw_phys(addr);
1614 break;
1615 case 4:
1616 ret = ldl_phys(addr);
1617 break;
1618 default:
1619 case 8:
1620 ret = ldq_phys(addr);
1621 break;
1623 break;
1625 case 0x24: // Nucleus quad LDD 128 bit atomic
1626 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1627 // Only ldda allowed
1628 raise_exception(TT_ILL_INSN);
1629 return 0;
1630 case 0x83: // Secondary no-fault
1631 case 0x8b: // Secondary no-fault LE
1632 if (cpu_get_phys_page_debug(env, addr) == -1ULL) {
1633 #ifdef DEBUG_ASI
1634 dump_asi("read ", last_addr, asi, size, ret);
1635 #endif
1636 return 0;
1638 // Fall through
1639 case 0x04: // Nucleus
1640 case 0x0c: // Nucleus Little Endian (LE)
1641 case 0x11: // As if user secondary
1642 case 0x19: // As if user secondary LE
1643 case 0x4a: // UPA config
1644 case 0x81: // Secondary
1645 case 0x89: // Secondary LE
1646 // XXX
1647 break;
1648 case 0x45: // LSU
1649 ret = env->lsu;
1650 break;
1651 case 0x50: // I-MMU regs
1653 int reg = (addr >> 3) & 0xf;
1655 ret = env->immuregs[reg];
1656 break;
1658 case 0x51: // I-MMU 8k TSB pointer
1659 case 0x52: // I-MMU 64k TSB pointer
1660 // XXX
1661 break;
1662 case 0x55: // I-MMU data access
1664 int reg = (addr >> 3) & 0x3f;
1666 ret = env->itlb_tte[reg];
1667 break;
1669 case 0x56: // I-MMU tag read
1671 int reg = (addr >> 3) & 0x3f;
1673 ret = env->itlb_tag[reg];
1674 break;
1676 case 0x58: // D-MMU regs
1678 int reg = (addr >> 3) & 0xf;
1680 ret = env->dmmuregs[reg];
1681 break;
1683 case 0x5d: // D-MMU data access
1685 int reg = (addr >> 3) & 0x3f;
1687 ret = env->dtlb_tte[reg];
1688 break;
1690 case 0x5e: // D-MMU tag read
1692 int reg = (addr >> 3) & 0x3f;
1694 ret = env->dtlb_tag[reg];
1695 break;
1697 case 0x46: // D-cache data
1698 case 0x47: // D-cache tag access
1699 case 0x4b: // E-cache error enable
1700 case 0x4c: // E-cache asynchronous fault status
1701 case 0x4d: // E-cache asynchronous fault address
1702 case 0x4e: // E-cache tag data
1703 case 0x66: // I-cache instruction access
1704 case 0x67: // I-cache tag access
1705 case 0x6e: // I-cache predecode
1706 case 0x6f: // I-cache LRU etc.
1707 case 0x76: // E-cache tag
1708 case 0x7e: // E-cache tag
1709 break;
1710 case 0x59: // D-MMU 8k TSB pointer
1711 case 0x5a: // D-MMU 64k TSB pointer
1712 case 0x5b: // D-MMU data pointer
1713 case 0x48: // Interrupt dispatch, RO
1714 case 0x49: // Interrupt data receive
1715 case 0x7f: // Incoming interrupt vector, RO
1716 // XXX
1717 break;
1718 case 0x54: // I-MMU data in, WO
1719 case 0x57: // I-MMU demap, WO
1720 case 0x5c: // D-MMU data in, WO
1721 case 0x5f: // D-MMU demap, WO
1722 case 0x77: // Interrupt vector, WO
1723 default:
1724 do_unassigned_access(addr, 0, 0, 1, size);
1725 ret = 0;
1726 break;
1729 /* Convert from little endian */
1730 switch (asi) {
1731 case 0x0c: // Nucleus Little Endian (LE)
1732 case 0x18: // As if user primary LE
1733 case 0x19: // As if user secondary LE
1734 case 0x1c: // Bypass LE
1735 case 0x1d: // Bypass, non-cacheable LE
1736 case 0x88: // Primary LE
1737 case 0x89: // Secondary LE
1738 case 0x8a: // Primary no-fault LE
1739 case 0x8b: // Secondary no-fault LE
1740 switch(size) {
1741 case 2:
1742 ret = bswap16(ret);
1743 break;
1744 case 4:
1745 ret = bswap32(ret);
1746 break;
1747 case 8:
1748 ret = bswap64(ret);
1749 break;
1750 default:
1751 break;
1753 default:
1754 break;
1757 /* Convert to signed number */
1758 if (sign) {
1759 switch(size) {
1760 case 1:
1761 ret = (int8_t) ret;
1762 break;
1763 case 2:
1764 ret = (int16_t) ret;
1765 break;
1766 case 4:
1767 ret = (int32_t) ret;
1768 break;
1769 default:
1770 break;
1773 #ifdef DEBUG_ASI
1774 dump_asi("read ", last_addr, asi, size, ret);
1775 #endif
1776 return ret;
1779 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1781 #ifdef DEBUG_ASI
1782 dump_asi("write", addr, asi, size, val);
1783 #endif
1784 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1785 || ((env->def->features & CPU_FEATURE_HYPV)
1786 && asi >= 0x30 && asi < 0x80
1787 && !(env->hpstate & HS_PRIV)))
1788 raise_exception(TT_PRIV_ACT);
1790 helper_check_align(addr, size - 1);
1791 /* Convert to little endian */
1792 switch (asi) {
1793 case 0x0c: // Nucleus Little Endian (LE)
1794 case 0x18: // As if user primary LE
1795 case 0x19: // As if user secondary LE
1796 case 0x1c: // Bypass LE
1797 case 0x1d: // Bypass, non-cacheable LE
1798 case 0x88: // Primary LE
1799 case 0x89: // Secondary LE
1800 switch(size) {
1801 case 2:
1802 addr = bswap16(addr);
1803 break;
1804 case 4:
1805 addr = bswap32(addr);
1806 break;
1807 case 8:
1808 addr = bswap64(addr);
1809 break;
1810 default:
1811 break;
1813 default:
1814 break;
1817 switch(asi) {
1818 case 0x10: // As if user primary
1819 case 0x18: // As if user primary LE
1820 case 0x80: // Primary
1821 case 0x88: // Primary LE
1822 case 0xe2: // UA2007 Primary block init
1823 case 0xe3: // UA2007 Secondary block init
1824 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1825 if ((env->def->features & CPU_FEATURE_HYPV)
1826 && env->hpstate & HS_PRIV) {
1827 switch(size) {
1828 case 1:
1829 stb_hypv(addr, val);
1830 break;
1831 case 2:
1832 stw_hypv(addr, val);
1833 break;
1834 case 4:
1835 stl_hypv(addr, val);
1836 break;
1837 case 8:
1838 default:
1839 stq_hypv(addr, val);
1840 break;
1842 } else {
1843 switch(size) {
1844 case 1:
1845 stb_kernel(addr, val);
1846 break;
1847 case 2:
1848 stw_kernel(addr, val);
1849 break;
1850 case 4:
1851 stl_kernel(addr, val);
1852 break;
1853 case 8:
1854 default:
1855 stq_kernel(addr, val);
1856 break;
1859 } else {
1860 switch(size) {
1861 case 1:
1862 stb_user(addr, val);
1863 break;
1864 case 2:
1865 stw_user(addr, val);
1866 break;
1867 case 4:
1868 stl_user(addr, val);
1869 break;
1870 case 8:
1871 default:
1872 stq_user(addr, val);
1873 break;
1876 break;
1877 case 0x14: // Bypass
1878 case 0x15: // Bypass, non-cacheable
1879 case 0x1c: // Bypass LE
1880 case 0x1d: // Bypass, non-cacheable LE
1882 switch(size) {
1883 case 1:
1884 stb_phys(addr, val);
1885 break;
1886 case 2:
1887 stw_phys(addr, val);
1888 break;
1889 case 4:
1890 stl_phys(addr, val);
1891 break;
1892 case 8:
1893 default:
1894 stq_phys(addr, val);
1895 break;
1898 return;
1899 case 0x24: // Nucleus quad LDD 128 bit atomic
1900 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1901 // Only ldda allowed
1902 raise_exception(TT_ILL_INSN);
1903 return;
1904 case 0x04: // Nucleus
1905 case 0x0c: // Nucleus Little Endian (LE)
1906 case 0x11: // As if user secondary
1907 case 0x19: // As if user secondary LE
1908 case 0x4a: // UPA config
1909 case 0x81: // Secondary
1910 case 0x89: // Secondary LE
1911 // XXX
1912 return;
1913 case 0x45: // LSU
1915 uint64_t oldreg;
1917 oldreg = env->lsu;
1918 env->lsu = val & (DMMU_E | IMMU_E);
1919 // Mappings generated during D/I MMU disabled mode are
1920 // invalid in normal mode
1921 if (oldreg != env->lsu) {
1922 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
1923 oldreg, env->lsu);
1924 #ifdef DEBUG_MMU
1925 dump_mmu(env);
1926 #endif
1927 tlb_flush(env, 1);
1929 return;
1931 case 0x50: // I-MMU regs
1933 int reg = (addr >> 3) & 0xf;
1934 uint64_t oldreg;
1936 oldreg = env->immuregs[reg];
1937 switch(reg) {
1938 case 0: // RO
1939 case 4:
1940 return;
1941 case 1: // Not in I-MMU
1942 case 2:
1943 case 7:
1944 case 8:
1945 return;
1946 case 3: // SFSR
1947 if ((val & 1) == 0)
1948 val = 0; // Clear SFSR
1949 break;
1950 case 5: // TSB access
1951 case 6: // Tag access
1952 default:
1953 break;
1955 env->immuregs[reg] = val;
1956 if (oldreg != env->immuregs[reg]) {
1957 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
1958 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1960 #ifdef DEBUG_MMU
1961 dump_mmu(env);
1962 #endif
1963 return;
1965 case 0x54: // I-MMU data in
1967 unsigned int i;
1969 // Try finding an invalid entry
1970 for (i = 0; i < 64; i++) {
1971 if ((env->itlb_tte[i] & 0x8000000000000000ULL) == 0) {
1972 env->itlb_tag[i] = env->immuregs[6];
1973 env->itlb_tte[i] = val;
1974 return;
1977 // Try finding an unlocked entry
1978 for (i = 0; i < 64; i++) {
1979 if ((env->itlb_tte[i] & 0x40) == 0) {
1980 env->itlb_tag[i] = env->immuregs[6];
1981 env->itlb_tte[i] = val;
1982 return;
1985 // error state?
1986 return;
1988 case 0x55: // I-MMU data access
1990 // TODO: auto demap
1992 unsigned int i = (addr >> 3) & 0x3f;
1994 env->itlb_tag[i] = env->immuregs[6];
1995 env->itlb_tte[i] = val;
1996 return;
1998 case 0x57: // I-MMU demap
2000 unsigned int i;
2002 for (i = 0; i < 64; i++) {
2003 if ((env->itlb_tte[i] & 0x8000000000000000ULL) != 0) {
2004 target_ulong mask = 0xffffffffffffe000ULL;
2006 mask <<= 3 * ((env->itlb_tte[i] >> 61) & 3);
2007 if ((val & mask) == (env->itlb_tag[i] & mask)) {
2008 env->itlb_tag[i] = 0;
2009 env->itlb_tte[i] = 0;
2011 return;
2015 return;
2016 case 0x58: // D-MMU regs
2018 int reg = (addr >> 3) & 0xf;
2019 uint64_t oldreg;
2021 oldreg = env->dmmuregs[reg];
2022 switch(reg) {
2023 case 0: // RO
2024 case 4:
2025 return;
2026 case 3: // SFSR
2027 if ((val & 1) == 0) {
2028 val = 0; // Clear SFSR, Fault address
2029 env->dmmuregs[4] = 0;
2031 env->dmmuregs[reg] = val;
2032 break;
2033 case 1: // Primary context
2034 case 2: // Secondary context
2035 case 5: // TSB access
2036 case 6: // Tag access
2037 case 7: // Virtual Watchpoint
2038 case 8: // Physical Watchpoint
2039 default:
2040 break;
2042 env->dmmuregs[reg] = val;
2043 if (oldreg != env->dmmuregs[reg]) {
2044 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2045 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2047 #ifdef DEBUG_MMU
2048 dump_mmu(env);
2049 #endif
2050 return;
2052 case 0x5c: // D-MMU data in
2054 unsigned int i;
2056 // Try finding an invalid entry
2057 for (i = 0; i < 64; i++) {
2058 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) == 0) {
2059 env->dtlb_tag[i] = env->dmmuregs[6];
2060 env->dtlb_tte[i] = val;
2061 return;
2064 // Try finding an unlocked entry
2065 for (i = 0; i < 64; i++) {
2066 if ((env->dtlb_tte[i] & 0x40) == 0) {
2067 env->dtlb_tag[i] = env->dmmuregs[6];
2068 env->dtlb_tte[i] = val;
2069 return;
2072 // error state?
2073 return;
2075 case 0x5d: // D-MMU data access
2077 unsigned int i = (addr >> 3) & 0x3f;
2079 env->dtlb_tag[i] = env->dmmuregs[6];
2080 env->dtlb_tte[i] = val;
2081 return;
2083 case 0x5f: // D-MMU demap
2085 unsigned int i;
2087 for (i = 0; i < 64; i++) {
2088 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) != 0) {
2089 target_ulong mask = 0xffffffffffffe000ULL;
2091 mask <<= 3 * ((env->dtlb_tte[i] >> 61) & 3);
2092 if ((val & mask) == (env->dtlb_tag[i] & mask)) {
2093 env->dtlb_tag[i] = 0;
2094 env->dtlb_tte[i] = 0;
2096 return;
2100 return;
2101 case 0x49: // Interrupt data receive
2102 // XXX
2103 return;
2104 case 0x46: // D-cache data
2105 case 0x47: // D-cache tag access
2106 case 0x4b: // E-cache error enable
2107 case 0x4c: // E-cache asynchronous fault status
2108 case 0x4d: // E-cache asynchronous fault address
2109 case 0x4e: // E-cache tag data
2110 case 0x66: // I-cache instruction access
2111 case 0x67: // I-cache tag access
2112 case 0x6e: // I-cache predecode
2113 case 0x6f: // I-cache LRU etc.
2114 case 0x76: // E-cache tag
2115 case 0x7e: // E-cache tag
2116 return;
2117 case 0x51: // I-MMU 8k TSB pointer, RO
2118 case 0x52: // I-MMU 64k TSB pointer, RO
2119 case 0x56: // I-MMU tag read, RO
2120 case 0x59: // D-MMU 8k TSB pointer, RO
2121 case 0x5a: // D-MMU 64k TSB pointer, RO
2122 case 0x5b: // D-MMU data pointer, RO
2123 case 0x5e: // D-MMU tag read, RO
2124 case 0x48: // Interrupt dispatch, RO
2125 case 0x7f: // Incoming interrupt vector, RO
2126 case 0x82: // Primary no-fault, RO
2127 case 0x83: // Secondary no-fault, RO
2128 case 0x8a: // Primary no-fault LE, RO
2129 case 0x8b: // Secondary no-fault LE, RO
2130 default:
2131 do_unassigned_access(addr, 1, 0, 1, size);
2132 return;
2135 #endif /* CONFIG_USER_ONLY */
2137 void helper_ldda_asi(target_ulong addr, int asi, int rd)
2139 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2140 || ((env->def->features & CPU_FEATURE_HYPV)
2141 && asi >= 0x30 && asi < 0x80
2142 && !(env->hpstate & HS_PRIV)))
2143 raise_exception(TT_PRIV_ACT);
2145 switch (asi) {
2146 case 0x24: // Nucleus quad LDD 128 bit atomic
2147 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2148 helper_check_align(addr, 0xf);
2149 if (rd == 0) {
2150 env->gregs[1] = ldq_kernel(addr + 8);
2151 if (asi == 0x2c)
2152 bswap64s(&env->gregs[1]);
2153 } else if (rd < 8) {
2154 env->gregs[rd] = ldq_kernel(addr);
2155 env->gregs[rd + 1] = ldq_kernel(addr + 8);
2156 if (asi == 0x2c) {
2157 bswap64s(&env->gregs[rd]);
2158 bswap64s(&env->gregs[rd + 1]);
2160 } else {
2161 env->regwptr[rd] = ldq_kernel(addr);
2162 env->regwptr[rd + 1] = ldq_kernel(addr + 8);
2163 if (asi == 0x2c) {
2164 bswap64s(&env->regwptr[rd]);
2165 bswap64s(&env->regwptr[rd + 1]);
2168 break;
2169 default:
2170 helper_check_align(addr, 0x3);
2171 if (rd == 0)
2172 env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
2173 else if (rd < 8) {
2174 env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
2175 env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2176 } else {
2177 env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
2178 env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2180 break;
2184 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2186 unsigned int i;
2187 target_ulong val;
2189 helper_check_align(addr, 3);
2190 switch (asi) {
2191 case 0xf0: // Block load primary
2192 case 0xf1: // Block load secondary
2193 case 0xf8: // Block load primary LE
2194 case 0xf9: // Block load secondary LE
2195 if (rd & 7) {
2196 raise_exception(TT_ILL_INSN);
2197 return;
2199 helper_check_align(addr, 0x3f);
2200 for (i = 0; i < 16; i++) {
2201 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
2203 addr += 4;
2206 return;
2207 default:
2208 break;
2211 val = helper_ld_asi(addr, asi, size, 0);
2212 switch(size) {
2213 default:
2214 case 4:
2215 *((uint32_t *)&env->fpr[rd]) = val;
2216 break;
2217 case 8:
2218 *((int64_t *)&DT0) = val;
2219 break;
2220 case 16:
2221 // XXX
2222 break;
2226 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2228 unsigned int i;
2229 target_ulong val = 0;
2231 helper_check_align(addr, 3);
2232 switch (asi) {
2233 case 0xe0: // UA2007 Block commit store primary (cache flush)
2234 case 0xe1: // UA2007 Block commit store secondary (cache flush)
2235 case 0xf0: // Block store primary
2236 case 0xf1: // Block store secondary
2237 case 0xf8: // Block store primary LE
2238 case 0xf9: // Block store secondary LE
2239 if (rd & 7) {
2240 raise_exception(TT_ILL_INSN);
2241 return;
2243 helper_check_align(addr, 0x3f);
2244 for (i = 0; i < 16; i++) {
2245 val = *(uint32_t *)&env->fpr[rd++];
2246 helper_st_asi(addr, val, asi & 0x8f, 4);
2247 addr += 4;
2250 return;
2251 default:
2252 break;
2255 switch(size) {
2256 default:
2257 case 4:
2258 val = *((uint32_t *)&env->fpr[rd]);
2259 break;
2260 case 8:
2261 val = *((int64_t *)&DT0);
2262 break;
2263 case 16:
2264 // XXX
2265 break;
2267 helper_st_asi(addr, val, asi, size);
2270 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2271 target_ulong val2, uint32_t asi)
2273 target_ulong ret;
2275 val2 &= 0xffffffffUL;
2276 ret = helper_ld_asi(addr, asi, 4, 0);
2277 ret &= 0xffffffffUL;
2278 if (val2 == ret)
2279 helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
2280 return ret;
2283 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2284 target_ulong val2, uint32_t asi)
2286 target_ulong ret;
2288 ret = helper_ld_asi(addr, asi, 8, 0);
2289 if (val2 == ret)
2290 helper_st_asi(addr, val1, asi, 8);
2291 return ret;
2293 #endif /* TARGET_SPARC64 */
2295 #ifndef TARGET_SPARC64
2296 void helper_rett(void)
2298 unsigned int cwp;
2300 if (env->psret == 1)
2301 raise_exception(TT_ILL_INSN);
2303 env->psret = 1;
2304 cwp = cpu_cwp_inc(env, env->cwp + 1) ;
2305 if (env->wim & (1 << cwp)) {
2306 raise_exception(TT_WIN_UNF);
2308 set_cwp(cwp);
2309 env->psrs = env->psrps;
2311 #endif
2313 target_ulong helper_udiv(target_ulong a, target_ulong b)
2315 uint64_t x0;
2316 uint32_t x1;
2318 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2319 x1 = b;
2321 if (x1 == 0) {
2322 raise_exception(TT_DIV_ZERO);
2325 x0 = x0 / x1;
2326 if (x0 > 0xffffffff) {
2327 env->cc_src2 = 1;
2328 return 0xffffffff;
2329 } else {
2330 env->cc_src2 = 0;
2331 return x0;
2335 target_ulong helper_sdiv(target_ulong a, target_ulong b)
2337 int64_t x0;
2338 int32_t x1;
2340 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
2341 x1 = b;
2343 if (x1 == 0) {
2344 raise_exception(TT_DIV_ZERO);
2347 x0 = x0 / x1;
2348 if ((int32_t) x0 != x0) {
2349 env->cc_src2 = 1;
2350 return x0 < 0? 0x80000000: 0x7fffffff;
2351 } else {
2352 env->cc_src2 = 0;
2353 return x0;
2357 void helper_stdf(target_ulong addr, int mem_idx)
2359 helper_check_align(addr, 7);
2360 #if !defined(CONFIG_USER_ONLY)
2361 switch (mem_idx) {
2362 case 0:
2363 stfq_user(addr, DT0);
2364 break;
2365 case 1:
2366 stfq_kernel(addr, DT0);
2367 break;
2368 #ifdef TARGET_SPARC64
2369 case 2:
2370 stfq_hypv(addr, DT0);
2371 break;
2372 #endif
2373 default:
2374 break;
2376 #else
2377 address_mask(env, &addr);
2378 stfq_raw(addr, DT0);
2379 #endif
2382 void helper_lddf(target_ulong addr, int mem_idx)
2384 helper_check_align(addr, 7);
2385 #if !defined(CONFIG_USER_ONLY)
2386 switch (mem_idx) {
2387 case 0:
2388 DT0 = ldfq_user(addr);
2389 break;
2390 case 1:
2391 DT0 = ldfq_kernel(addr);
2392 break;
2393 #ifdef TARGET_SPARC64
2394 case 2:
2395 DT0 = ldfq_hypv(addr);
2396 break;
2397 #endif
2398 default:
2399 break;
2401 #else
2402 address_mask(env, &addr);
2403 DT0 = ldfq_raw(addr);
2404 #endif
2407 void helper_ldqf(target_ulong addr, int mem_idx)
2409 // XXX add 128 bit load
2410 CPU_QuadU u;
2412 helper_check_align(addr, 7);
2413 #if !defined(CONFIG_USER_ONLY)
2414 switch (mem_idx) {
2415 case 0:
2416 u.ll.upper = ldq_user(addr);
2417 u.ll.lower = ldq_user(addr + 8);
2418 QT0 = u.q;
2419 break;
2420 case 1:
2421 u.ll.upper = ldq_kernel(addr);
2422 u.ll.lower = ldq_kernel(addr + 8);
2423 QT0 = u.q;
2424 break;
2425 #ifdef TARGET_SPARC64
2426 case 2:
2427 u.ll.upper = ldq_hypv(addr);
2428 u.ll.lower = ldq_hypv(addr + 8);
2429 QT0 = u.q;
2430 break;
2431 #endif
2432 default:
2433 break;
2435 #else
2436 address_mask(env, &addr);
2437 u.ll.upper = ldq_raw(addr);
2438 u.ll.lower = ldq_raw((addr + 8) & 0xffffffffULL);
2439 QT0 = u.q;
2440 #endif
2443 void helper_stqf(target_ulong addr, int mem_idx)
2445 // XXX add 128 bit store
2446 CPU_QuadU u;
2448 helper_check_align(addr, 7);
2449 #if !defined(CONFIG_USER_ONLY)
2450 switch (mem_idx) {
2451 case 0:
2452 u.q = QT0;
2453 stq_user(addr, u.ll.upper);
2454 stq_user(addr + 8, u.ll.lower);
2455 break;
2456 case 1:
2457 u.q = QT0;
2458 stq_kernel(addr, u.ll.upper);
2459 stq_kernel(addr + 8, u.ll.lower);
2460 break;
2461 #ifdef TARGET_SPARC64
2462 case 2:
2463 u.q = QT0;
2464 stq_hypv(addr, u.ll.upper);
2465 stq_hypv(addr + 8, u.ll.lower);
2466 break;
2467 #endif
2468 default:
2469 break;
2471 #else
2472 u.q = QT0;
2473 address_mask(env, &addr);
2474 stq_raw(addr, u.ll.upper);
2475 stq_raw((addr + 8) & 0xffffffffULL, u.ll.lower);
2476 #endif
2479 static inline void set_fsr(void)
2481 int rnd_mode;
2483 switch (env->fsr & FSR_RD_MASK) {
2484 case FSR_RD_NEAREST:
2485 rnd_mode = float_round_nearest_even;
2486 break;
2487 default:
2488 case FSR_RD_ZERO:
2489 rnd_mode = float_round_to_zero;
2490 break;
2491 case FSR_RD_POS:
2492 rnd_mode = float_round_up;
2493 break;
2494 case FSR_RD_NEG:
2495 rnd_mode = float_round_down;
2496 break;
2498 set_float_rounding_mode(rnd_mode, &env->fp_status);
2501 void helper_ldfsr(uint32_t new_fsr)
2503 env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
2504 set_fsr();
2507 #ifdef TARGET_SPARC64
2508 void helper_ldxfsr(uint64_t new_fsr)
2510 env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
2511 set_fsr();
2513 #endif
2515 void helper_debug(void)
2517 env->exception_index = EXCP_DEBUG;
2518 cpu_loop_exit();
2521 #ifndef TARGET_SPARC64
2522 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2523 handling ? */
2524 void helper_save(void)
2526 uint32_t cwp;
2528 cwp = cpu_cwp_dec(env, env->cwp - 1);
2529 if (env->wim & (1 << cwp)) {
2530 raise_exception(TT_WIN_OVF);
2532 set_cwp(cwp);
2535 void helper_restore(void)
2537 uint32_t cwp;
2539 cwp = cpu_cwp_inc(env, env->cwp + 1);
2540 if (env->wim & (1 << cwp)) {
2541 raise_exception(TT_WIN_UNF);
2543 set_cwp(cwp);
2546 void helper_wrpsr(target_ulong new_psr)
2548 if ((new_psr & PSR_CWP) >= env->nwindows)
2549 raise_exception(TT_ILL_INSN);
2550 else
2551 PUT_PSR(env, new_psr);
2554 target_ulong helper_rdpsr(void)
2556 return GET_PSR(env);
2559 #else
2560 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2561 handling ? */
2562 void helper_save(void)
2564 uint32_t cwp;
2566 cwp = cpu_cwp_dec(env, env->cwp - 1);
2567 if (env->cansave == 0) {
2568 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2569 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2570 ((env->wstate & 0x7) << 2)));
2571 } else {
2572 if (env->cleanwin - env->canrestore == 0) {
2573 // XXX Clean windows without trap
2574 raise_exception(TT_CLRWIN);
2575 } else {
2576 env->cansave--;
2577 env->canrestore++;
2578 set_cwp(cwp);
2583 void helper_restore(void)
2585 uint32_t cwp;
2587 cwp = cpu_cwp_inc(env, env->cwp + 1);
2588 if (env->canrestore == 0) {
2589 raise_exception(TT_FILL | (env->otherwin != 0 ?
2590 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2591 ((env->wstate & 0x7) << 2)));
2592 } else {
2593 env->cansave++;
2594 env->canrestore--;
2595 set_cwp(cwp);
2599 void helper_flushw(void)
2601 if (env->cansave != env->nwindows - 2) {
2602 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2603 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2604 ((env->wstate & 0x7) << 2)));
2608 void helper_saved(void)
2610 env->cansave++;
2611 if (env->otherwin == 0)
2612 env->canrestore--;
2613 else
2614 env->otherwin--;
2617 void helper_restored(void)
2619 env->canrestore++;
2620 if (env->cleanwin < env->nwindows - 1)
2621 env->cleanwin++;
2622 if (env->otherwin == 0)
2623 env->cansave--;
2624 else
2625 env->otherwin--;
2628 target_ulong helper_rdccr(void)
2630 return GET_CCR(env);
2633 void helper_wrccr(target_ulong new_ccr)
2635 PUT_CCR(env, new_ccr);
2638 // CWP handling is reversed in V9, but we still use the V8 register
2639 // order.
2640 target_ulong helper_rdcwp(void)
2642 return GET_CWP64(env);
2645 void helper_wrcwp(target_ulong new_cwp)
2647 PUT_CWP64(env, new_cwp);
2650 // This function uses non-native bit order
2651 #define GET_FIELD(X, FROM, TO) \
2652 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
2654 // This function uses the order in the manuals, i.e. bit 0 is 2^0
2655 #define GET_FIELD_SP(X, FROM, TO) \
2656 GET_FIELD(X, 63 - (TO), 63 - (FROM))
2658 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
2660 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
2661 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
2662 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
2663 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
2664 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
2665 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
2666 (((pixel_addr >> 55) & 1) << 4) |
2667 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
2668 GET_FIELD_SP(pixel_addr, 11, 12);
2671 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
2673 uint64_t tmp;
2675 tmp = addr + offset;
2676 env->gsr &= ~7ULL;
2677 env->gsr |= tmp & 7ULL;
2678 return tmp & ~7ULL;
2681 target_ulong helper_popc(target_ulong val)
2683 return ctpop64(val);
2686 static inline uint64_t *get_gregset(uint64_t pstate)
2688 switch (pstate) {
2689 default:
2690 case 0:
2691 return env->bgregs;
2692 case PS_AG:
2693 return env->agregs;
2694 case PS_MG:
2695 return env->mgregs;
2696 case PS_IG:
2697 return env->igregs;
2701 static inline void change_pstate(uint64_t new_pstate)
2703 uint64_t pstate_regs, new_pstate_regs;
2704 uint64_t *src, *dst;
2706 pstate_regs = env->pstate & 0xc01;
2707 new_pstate_regs = new_pstate & 0xc01;
2708 if (new_pstate_regs != pstate_regs) {
2709 // Switch global register bank
2710 src = get_gregset(new_pstate_regs);
2711 dst = get_gregset(pstate_regs);
2712 memcpy32(dst, env->gregs);
2713 memcpy32(env->gregs, src);
2715 env->pstate = new_pstate;
2718 void helper_wrpstate(target_ulong new_state)
2720 if (!(env->def->features & CPU_FEATURE_GL))
2721 change_pstate(new_state & 0xf3f);
2724 void helper_done(void)
2726 env->pc = env->tsptr->tpc;
2727 env->npc = env->tsptr->tnpc + 4;
2728 PUT_CCR(env, env->tsptr->tstate >> 32);
2729 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2730 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2731 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2732 env->tl--;
2733 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2736 void helper_retry(void)
2738 env->pc = env->tsptr->tpc;
2739 env->npc = env->tsptr->tnpc;
2740 PUT_CCR(env, env->tsptr->tstate >> 32);
2741 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2742 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2743 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2744 env->tl--;
2745 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2748 void helper_set_softint(uint64_t value)
2750 env->softint |= (uint32_t)value;
2753 void helper_clear_softint(uint64_t value)
2755 env->softint &= (uint32_t)~value;
2758 void helper_write_softint(uint64_t value)
2760 env->softint = (uint32_t)value;
2762 #endif
2764 void helper_flush(target_ulong addr)
2766 addr &= ~7;
2767 tb_invalidate_page_range(addr, addr + 8);
2770 #ifdef TARGET_SPARC64
2771 #ifdef DEBUG_PCALL
2772 static const char * const excp_names[0x80] = {
2773 [TT_TFAULT] = "Instruction Access Fault",
2774 [TT_TMISS] = "Instruction Access MMU Miss",
2775 [TT_CODE_ACCESS] = "Instruction Access Error",
2776 [TT_ILL_INSN] = "Illegal Instruction",
2777 [TT_PRIV_INSN] = "Privileged Instruction",
2778 [TT_NFPU_INSN] = "FPU Disabled",
2779 [TT_FP_EXCP] = "FPU Exception",
2780 [TT_TOVF] = "Tag Overflow",
2781 [TT_CLRWIN] = "Clean Windows",
2782 [TT_DIV_ZERO] = "Division By Zero",
2783 [TT_DFAULT] = "Data Access Fault",
2784 [TT_DMISS] = "Data Access MMU Miss",
2785 [TT_DATA_ACCESS] = "Data Access Error",
2786 [TT_DPROT] = "Data Protection Error",
2787 [TT_UNALIGNED] = "Unaligned Memory Access",
2788 [TT_PRIV_ACT] = "Privileged Action",
2789 [TT_EXTINT | 0x1] = "External Interrupt 1",
2790 [TT_EXTINT | 0x2] = "External Interrupt 2",
2791 [TT_EXTINT | 0x3] = "External Interrupt 3",
2792 [TT_EXTINT | 0x4] = "External Interrupt 4",
2793 [TT_EXTINT | 0x5] = "External Interrupt 5",
2794 [TT_EXTINT | 0x6] = "External Interrupt 6",
2795 [TT_EXTINT | 0x7] = "External Interrupt 7",
2796 [TT_EXTINT | 0x8] = "External Interrupt 8",
2797 [TT_EXTINT | 0x9] = "External Interrupt 9",
2798 [TT_EXTINT | 0xa] = "External Interrupt 10",
2799 [TT_EXTINT | 0xb] = "External Interrupt 11",
2800 [TT_EXTINT | 0xc] = "External Interrupt 12",
2801 [TT_EXTINT | 0xd] = "External Interrupt 13",
2802 [TT_EXTINT | 0xe] = "External Interrupt 14",
2803 [TT_EXTINT | 0xf] = "External Interrupt 15",
2805 #endif
2807 void do_interrupt(CPUState *env)
2809 int intno = env->exception_index;
2811 #ifdef DEBUG_PCALL
2812 if (qemu_loglevel_mask(CPU_LOG_INT)) {
2813 static int count;
2814 const char *name;
2816 if (intno < 0 || intno >= 0x180)
2817 name = "Unknown";
2818 else if (intno >= 0x100)
2819 name = "Trap Instruction";
2820 else if (intno >= 0xc0)
2821 name = "Window Fill";
2822 else if (intno >= 0x80)
2823 name = "Window Spill";
2824 else {
2825 name = excp_names[intno];
2826 if (!name)
2827 name = "Unknown";
2830 qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
2831 " SP=%016" PRIx64 "\n",
2832 count, name, intno,
2833 env->pc,
2834 env->npc, env->regwptr[6]);
2835 log_cpu_state(env, 0);
2836 #if 0
2838 int i;
2839 uint8_t *ptr;
2841 qemu_log(" code=");
2842 ptr = (uint8_t *)env->pc;
2843 for(i = 0; i < 16; i++) {
2844 qemu_log(" %02x", ldub(ptr + i));
2846 qemu_log("\n");
2848 #endif
2849 count++;
2851 #endif
2852 #if !defined(CONFIG_USER_ONLY)
2853 if (env->tl >= env->maxtl) {
2854 cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
2855 " Error state", env->exception_index, env->tl, env->maxtl);
2856 return;
2858 #endif
2859 if (env->tl < env->maxtl - 1) {
2860 env->tl++;
2861 } else {
2862 env->pstate |= PS_RED;
2863 if (env->tl < env->maxtl)
2864 env->tl++;
2866 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2867 env->tsptr->tstate = ((uint64_t)GET_CCR(env) << 32) |
2868 ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
2869 GET_CWP64(env);
2870 env->tsptr->tpc = env->pc;
2871 env->tsptr->tnpc = env->npc;
2872 env->tsptr->tt = intno;
2873 if (!(env->def->features & CPU_FEATURE_GL)) {
2874 switch (intno) {
2875 case TT_IVEC:
2876 change_pstate(PS_PEF | PS_PRIV | PS_IG);
2877 break;
2878 case TT_TFAULT:
2879 case TT_TMISS:
2880 case TT_DFAULT:
2881 case TT_DMISS:
2882 case TT_DPROT:
2883 change_pstate(PS_PEF | PS_PRIV | PS_MG);
2884 break;
2885 default:
2886 change_pstate(PS_PEF | PS_PRIV | PS_AG);
2887 break;
2890 if (intno == TT_CLRWIN)
2891 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
2892 else if ((intno & 0x1c0) == TT_SPILL)
2893 cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
2894 else if ((intno & 0x1c0) == TT_FILL)
2895 cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
2896 env->tbr &= ~0x7fffULL;
2897 env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
2898 env->pc = env->tbr;
2899 env->npc = env->pc + 4;
2900 env->exception_index = 0;
2902 #else
2903 #ifdef DEBUG_PCALL
2904 static const char * const excp_names[0x80] = {
2905 [TT_TFAULT] = "Instruction Access Fault",
2906 [TT_ILL_INSN] = "Illegal Instruction",
2907 [TT_PRIV_INSN] = "Privileged Instruction",
2908 [TT_NFPU_INSN] = "FPU Disabled",
2909 [TT_WIN_OVF] = "Window Overflow",
2910 [TT_WIN_UNF] = "Window Underflow",
2911 [TT_UNALIGNED] = "Unaligned Memory Access",
2912 [TT_FP_EXCP] = "FPU Exception",
2913 [TT_DFAULT] = "Data Access Fault",
2914 [TT_TOVF] = "Tag Overflow",
2915 [TT_EXTINT | 0x1] = "External Interrupt 1",
2916 [TT_EXTINT | 0x2] = "External Interrupt 2",
2917 [TT_EXTINT | 0x3] = "External Interrupt 3",
2918 [TT_EXTINT | 0x4] = "External Interrupt 4",
2919 [TT_EXTINT | 0x5] = "External Interrupt 5",
2920 [TT_EXTINT | 0x6] = "External Interrupt 6",
2921 [TT_EXTINT | 0x7] = "External Interrupt 7",
2922 [TT_EXTINT | 0x8] = "External Interrupt 8",
2923 [TT_EXTINT | 0x9] = "External Interrupt 9",
2924 [TT_EXTINT | 0xa] = "External Interrupt 10",
2925 [TT_EXTINT | 0xb] = "External Interrupt 11",
2926 [TT_EXTINT | 0xc] = "External Interrupt 12",
2927 [TT_EXTINT | 0xd] = "External Interrupt 13",
2928 [TT_EXTINT | 0xe] = "External Interrupt 14",
2929 [TT_EXTINT | 0xf] = "External Interrupt 15",
2930 [TT_TOVF] = "Tag Overflow",
2931 [TT_CODE_ACCESS] = "Instruction Access Error",
2932 [TT_DATA_ACCESS] = "Data Access Error",
2933 [TT_DIV_ZERO] = "Division By Zero",
2934 [TT_NCP_INSN] = "Coprocessor Disabled",
2936 #endif
2938 void do_interrupt(CPUState *env)
2940 int cwp, intno = env->exception_index;
2942 #ifdef DEBUG_PCALL
2943 if (qemu_loglevel_mask(CPU_LOG_INT)) {
2944 static int count;
2945 const char *name;
2947 if (intno < 0 || intno >= 0x100)
2948 name = "Unknown";
2949 else if (intno >= 0x80)
2950 name = "Trap Instruction";
2951 else {
2952 name = excp_names[intno];
2953 if (!name)
2954 name = "Unknown";
2957 qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
2958 count, name, intno,
2959 env->pc,
2960 env->npc, env->regwptr[6]);
2961 log_cpu_state(env, 0);
2962 #if 0
2964 int i;
2965 uint8_t *ptr;
2967 qemu_log(" code=");
2968 ptr = (uint8_t *)env->pc;
2969 for(i = 0; i < 16; i++) {
2970 qemu_log(" %02x", ldub(ptr + i));
2972 qemu_log("\n");
2974 #endif
2975 count++;
2977 #endif
2978 #if !defined(CONFIG_USER_ONLY)
2979 if (env->psret == 0) {
2980 cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
2981 env->exception_index);
2982 return;
2984 #endif
2985 env->psret = 0;
2986 cwp = cpu_cwp_dec(env, env->cwp - 1);
2987 cpu_set_cwp(env, cwp);
2988 env->regwptr[9] = env->pc;
2989 env->regwptr[10] = env->npc;
2990 env->psrps = env->psrs;
2991 env->psrs = 1;
2992 env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
2993 env->pc = env->tbr;
2994 env->npc = env->pc + 4;
2995 env->exception_index = 0;
2997 #endif
2999 #if !defined(CONFIG_USER_ONLY)
3001 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3002 void *retaddr);
3004 #define MMUSUFFIX _mmu
3005 #define ALIGNED_ONLY
3007 #define SHIFT 0
3008 #include "softmmu_template.h"
3010 #define SHIFT 1
3011 #include "softmmu_template.h"
3013 #define SHIFT 2
3014 #include "softmmu_template.h"
3016 #define SHIFT 3
3017 #include "softmmu_template.h"
3019 /* XXX: make it generic ? */
3020 static void cpu_restore_state2(void *retaddr)
3022 TranslationBlock *tb;
3023 unsigned long pc;
3025 if (retaddr) {
3026 /* now we have a real cpu fault */
3027 pc = (unsigned long)retaddr;
3028 tb = tb_find_pc(pc);
3029 if (tb) {
3030 /* the PC is inside the translated code. It means that we have
3031 a virtual CPU fault */
3032 cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
3037 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
3038 void *retaddr)
3040 #ifdef DEBUG_UNALIGNED
3041 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
3042 "\n", addr, env->pc);
3043 #endif
3044 cpu_restore_state2(retaddr);
3045 raise_exception(TT_UNALIGNED);
3048 /* try to fill the TLB and return an exception if error. If retaddr is
3049 NULL, it means that the function was called in C code (i.e. not
3050 from generated code or from helper.c) */
3051 /* XXX: fix it to restore all registers */
3052 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3054 int ret;
3055 CPUState *saved_env;
3057 /* XXX: hack to restore env in all cases, even if not called from
3058 generated code */
3059 saved_env = env;
3060 env = cpu_single_env;
3062 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3063 if (ret) {
3064 cpu_restore_state2(retaddr);
3065 cpu_loop_exit();
3067 env = saved_env;
3070 #endif
3072 #ifndef TARGET_SPARC64
3073 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3074 int is_asi, int size)
3076 CPUState *saved_env;
3078 /* XXX: hack to restore env in all cases, even if not called from
3079 generated code */
3080 saved_env = env;
3081 env = cpu_single_env;
3082 #ifdef DEBUG_UNASSIGNED
3083 if (is_asi)
3084 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3085 " asi 0x%02x from " TARGET_FMT_lx "\n",
3086 is_exec ? "exec" : is_write ? "write" : "read", size,
3087 size == 1 ? "" : "s", addr, is_asi, env->pc);
3088 else
3089 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3090 " from " TARGET_FMT_lx "\n",
3091 is_exec ? "exec" : is_write ? "write" : "read", size,
3092 size == 1 ? "" : "s", addr, env->pc);
3093 #endif
3094 if (env->mmuregs[3]) /* Fault status register */
3095 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
3096 if (is_asi)
3097 env->mmuregs[3] |= 1 << 16;
3098 if (env->psrs)
3099 env->mmuregs[3] |= 1 << 5;
3100 if (is_exec)
3101 env->mmuregs[3] |= 1 << 6;
3102 if (is_write)
3103 env->mmuregs[3] |= 1 << 7;
3104 env->mmuregs[3] |= (5 << 2) | 2;
3105 env->mmuregs[4] = addr; /* Fault address register */
3106 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
3107 if (is_exec)
3108 raise_exception(TT_CODE_ACCESS);
3109 else
3110 raise_exception(TT_DATA_ACCESS);
3112 env = saved_env;
3114 #else
3115 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
3116 int is_asi, int size)
3118 #ifdef DEBUG_UNASSIGNED
3119 CPUState *saved_env;
3121 /* XXX: hack to restore env in all cases, even if not called from
3122 generated code */
3123 saved_env = env;
3124 env = cpu_single_env;
3125 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
3126 "\n", addr, env->pc);
3127 env = saved_env;
3128 #endif
3129 if (is_exec)
3130 raise_exception(TT_CODE_ACCESS);
3131 else
3132 raise_exception(TT_DATA_ACCESS);
3134 #endif
3136 #ifdef TARGET_SPARC64
3137 void helper_tick_set_count(void *opaque, uint64_t count)
3139 #if !defined(CONFIG_USER_ONLY)
3140 cpu_tick_set_count(opaque, count);
3141 #endif
3144 uint64_t helper_tick_get_count(void *opaque)
3146 #if !defined(CONFIG_USER_ONLY)
3147 return cpu_tick_get_count(opaque);
3148 #else
3149 return 0;
3150 #endif
3153 void helper_tick_set_limit(void *opaque, uint64_t limit)
3155 #if !defined(CONFIG_USER_ONLY)
3156 cpu_tick_set_limit(opaque, limit);
3157 #endif
3159 #endif