2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "host-utils.h"
22 #include "softfloat.h"
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
27 void helper_excp (int excp
, int error
)
29 env
->exception_index
= excp
;
30 env
->error_code
= error
;
34 uint64_t helper_load_pcc (void)
40 uint64_t helper_load_fpcr (void)
43 #ifdef CONFIG_SOFTFLOAT
44 ret
|= env
->fp_status
.float_exception_flags
<< 52;
45 if (env
->fp_status
.float_exception_flags
)
47 env
->ipr
[IPR_EXC_SUM
] &= ~0x3E:
48 env
->ipr
[IPR_EXC_SUM
] |= env
->fp_status
.float_exception_flags
<< 1;
50 switch (env
->fp_status
.float_rounding_mode
) {
51 case float_round_nearest_even
:
54 case float_round_down
:
60 case float_round_to_zero
:
66 void helper_store_fpcr (uint64_t val
)
68 #ifdef CONFIG_SOFTFLOAT
69 set_float_exception_flags((val
>> 52) & 0x3F, &FP_STATUS
);
71 switch ((val
>> 58) & 3) {
73 set_float_rounding_mode(float_round_to_zero
, &FP_STATUS
);
76 set_float_rounding_mode(float_round_down
, &FP_STATUS
);
79 set_float_rounding_mode(float_round_nearest_even
, &FP_STATUS
);
82 set_float_rounding_mode(float_round_up
, &FP_STATUS
);
87 static spinlock_t intr_cpu_lock
= SPIN_LOCK_UNLOCKED
;
89 uint64_t helper_rs(void)
93 spin_lock(&intr_cpu_lock
);
96 spin_unlock(&intr_cpu_lock
);
101 uint64_t helper_rc(void)
105 spin_lock(&intr_cpu_lock
);
106 tmp
= env
->intr_flag
;
108 spin_unlock(&intr_cpu_lock
);
113 uint64_t helper_addqv (uint64_t op1
, uint64_t op2
)
117 if (unlikely((tmp
^ op2
^ (-1ULL)) & (tmp
^ op1
) & (1ULL << 63))) {
118 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
123 uint64_t helper_addlv (uint64_t op1
, uint64_t op2
)
126 op1
= (uint32_t)(op1
+ op2
);
127 if (unlikely((tmp
^ op2
^ (-1UL)) & (tmp
^ op1
) & (1UL << 31))) {
128 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
133 uint64_t helper_subqv (uint64_t op1
, uint64_t op2
)
137 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1ULL << 63))) {
138 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
143 uint64_t helper_sublv (uint64_t op1
, uint64_t op2
)
147 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1UL << 31))) {
148 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
153 uint64_t helper_mullv (uint64_t op1
, uint64_t op2
)
155 int64_t res
= (int64_t)op1
* (int64_t)op2
;
157 if (unlikely((int32_t)res
!= res
)) {
158 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
160 return (int64_t)((int32_t)res
);
163 uint64_t helper_mulqv (uint64_t op1
, uint64_t op2
)
167 muls64(&tl
, &th
, op1
, op2
);
168 /* If th != 0 && th != -1, then we had an overflow */
169 if (unlikely((th
+ 1) > 1)) {
170 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
175 uint64_t helper_umulh (uint64_t op1
, uint64_t op2
)
179 mulu64(&tl
, &th
, op1
, op2
);
183 uint64_t helper_ctpop (uint64_t arg
)
188 uint64_t helper_ctlz (uint64_t arg
)
193 uint64_t helper_cttz (uint64_t arg
)
198 static inline uint64_t byte_zap(uint64_t op
, uint8_t mskb
)
203 mask
|= ((mskb
>> 0) & 1) * 0x00000000000000FFULL
;
204 mask
|= ((mskb
>> 1) & 1) * 0x000000000000FF00ULL
;
205 mask
|= ((mskb
>> 2) & 1) * 0x0000000000FF0000ULL
;
206 mask
|= ((mskb
>> 3) & 1) * 0x00000000FF000000ULL
;
207 mask
|= ((mskb
>> 4) & 1) * 0x000000FF00000000ULL
;
208 mask
|= ((mskb
>> 5) & 1) * 0x0000FF0000000000ULL
;
209 mask
|= ((mskb
>> 6) & 1) * 0x00FF000000000000ULL
;
210 mask
|= ((mskb
>> 7) & 1) * 0xFF00000000000000ULL
;
215 uint64_t helper_mskbl(uint64_t val
, uint64_t mask
)
217 return byte_zap(val
, 0x01 << (mask
& 7));
220 uint64_t helper_insbl(uint64_t val
, uint64_t mask
)
222 val
<<= (mask
& 7) * 8;
223 return byte_zap(val
, ~(0x01 << (mask
& 7)));
226 uint64_t helper_mskwl(uint64_t val
, uint64_t mask
)
228 return byte_zap(val
, 0x03 << (mask
& 7));
231 uint64_t helper_inswl(uint64_t val
, uint64_t mask
)
233 val
<<= (mask
& 7) * 8;
234 return byte_zap(val
, ~(0x03 << (mask
& 7)));
237 uint64_t helper_mskll(uint64_t val
, uint64_t mask
)
239 return byte_zap(val
, 0x0F << (mask
& 7));
242 uint64_t helper_insll(uint64_t val
, uint64_t mask
)
244 val
<<= (mask
& 7) * 8;
245 return byte_zap(val
, ~(0x0F << (mask
& 7)));
248 uint64_t helper_zap(uint64_t val
, uint64_t mask
)
250 return byte_zap(val
, mask
);
253 uint64_t helper_zapnot(uint64_t val
, uint64_t mask
)
255 return byte_zap(val
, ~mask
);
258 uint64_t helper_mskql(uint64_t val
, uint64_t mask
)
260 return byte_zap(val
, 0xFF << (mask
& 7));
263 uint64_t helper_insql(uint64_t val
, uint64_t mask
)
265 val
<<= (mask
& 7) * 8;
266 return byte_zap(val
, ~(0xFF << (mask
& 7)));
269 uint64_t helper_mskwh(uint64_t val
, uint64_t mask
)
271 return byte_zap(val
, (0x03 << (mask
& 7)) >> 8);
274 uint64_t helper_inswh(uint64_t val
, uint64_t mask
)
276 val
>>= 64 - ((mask
& 7) * 8);
277 return byte_zap(val
, ~((0x03 << (mask
& 7)) >> 8));
280 uint64_t helper_msklh(uint64_t val
, uint64_t mask
)
282 return byte_zap(val
, (0x0F << (mask
& 7)) >> 8);
285 uint64_t helper_inslh(uint64_t val
, uint64_t mask
)
287 val
>>= 64 - ((mask
& 7) * 8);
288 return byte_zap(val
, ~((0x0F << (mask
& 7)) >> 8));
291 uint64_t helper_mskqh(uint64_t val
, uint64_t mask
)
293 return byte_zap(val
, (0xFF << (mask
& 7)) >> 8);
296 uint64_t helper_insqh(uint64_t val
, uint64_t mask
)
298 val
>>= 64 - ((mask
& 7) * 8);
299 return byte_zap(val
, ~((0xFF << (mask
& 7)) >> 8));
302 uint64_t helper_cmpbge (uint64_t op1
, uint64_t op2
)
304 uint8_t opa
, opb
, res
;
308 for (i
= 0; i
< 8; i
++) {
309 opa
= op1
>> (i
* 8);
310 opb
= op2
>> (i
* 8);
317 /* Floating point helpers */
319 /* F floating (VAX) */
320 static inline uint64_t float32_to_f(float32 fa
)
322 uint64_t r
, exp
, mant
, sig
;
326 sig
= ((uint64_t)a
.l
& 0x80000000) << 32;
327 exp
= (a
.l
>> 23) & 0xff;
328 mant
= ((uint64_t)a
.l
& 0x007fffff) << 29;
331 /* NaN or infinity */
332 r
= 1; /* VAX dirty zero */
333 } else if (exp
== 0) {
339 r
= sig
| ((exp
+ 1) << 52) | mant
;
344 r
= 1; /* VAX dirty zero */
346 r
= sig
| ((exp
+ 2) << 52);
353 static inline float32
f_to_float32(uint64_t a
)
355 uint32_t exp
, mant_sig
;
358 exp
= ((a
>> 55) & 0x80) | ((a
>> 52) & 0x7f);
359 mant_sig
= ((a
>> 32) & 0x80000000) | ((a
>> 29) & 0x007fffff);
361 if (unlikely(!exp
&& mant_sig
)) {
362 /* Reserved operands / Dirty zero */
363 helper_excp(EXCP_OPCDEC
, 0);
370 r
.l
= ((exp
- 2) << 23) | mant_sig
;
376 uint32_t helper_f_to_memory (uint64_t a
)
379 r
= (a
& 0x00001fffe0000000ull
) >> 13;
380 r
|= (a
& 0x07ffe00000000000ull
) >> 45;
381 r
|= (a
& 0xc000000000000000ull
) >> 48;
385 uint64_t helper_memory_to_f (uint32_t a
)
388 r
= ((uint64_t)(a
& 0x0000c000)) << 48;
389 r
|= ((uint64_t)(a
& 0x003fffff)) << 45;
390 r
|= ((uint64_t)(a
& 0xffff0000)) << 13;
391 if (!(a
& 0x00004000))
396 uint64_t helper_addf (uint64_t a
, uint64_t b
)
400 fa
= f_to_float32(a
);
401 fb
= f_to_float32(b
);
402 fr
= float32_add(fa
, fb
, &FP_STATUS
);
403 return float32_to_f(fr
);
406 uint64_t helper_subf (uint64_t a
, uint64_t b
)
410 fa
= f_to_float32(a
);
411 fb
= f_to_float32(b
);
412 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
413 return float32_to_f(fr
);
416 uint64_t helper_mulf (uint64_t a
, uint64_t b
)
420 fa
= f_to_float32(a
);
421 fb
= f_to_float32(b
);
422 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
423 return float32_to_f(fr
);
426 uint64_t helper_divf (uint64_t a
, uint64_t b
)
430 fa
= f_to_float32(a
);
431 fb
= f_to_float32(b
);
432 fr
= float32_div(fa
, fb
, &FP_STATUS
);
433 return float32_to_f(fr
);
436 uint64_t helper_sqrtf (uint64_t t
)
440 ft
= f_to_float32(t
);
441 fr
= float32_sqrt(ft
, &FP_STATUS
);
442 return float32_to_f(fr
);
446 /* G floating (VAX) */
447 static inline uint64_t float64_to_g(float64 fa
)
449 uint64_t r
, exp
, mant
, sig
;
453 sig
= a
.ll
& 0x8000000000000000ull
;
454 exp
= (a
.ll
>> 52) & 0x7ff;
455 mant
= a
.ll
& 0x000fffffffffffffull
;
458 /* NaN or infinity */
459 r
= 1; /* VAX dirty zero */
460 } else if (exp
== 0) {
466 r
= sig
| ((exp
+ 1) << 52) | mant
;
471 r
= 1; /* VAX dirty zero */
473 r
= sig
| ((exp
+ 2) << 52);
480 static inline float64
g_to_float64(uint64_t a
)
482 uint64_t exp
, mant_sig
;
485 exp
= (a
>> 52) & 0x7ff;
486 mant_sig
= a
& 0x800fffffffffffffull
;
488 if (!exp
&& mant_sig
) {
489 /* Reserved operands / Dirty zero */
490 helper_excp(EXCP_OPCDEC
, 0);
497 r
.ll
= ((exp
- 2) << 52) | mant_sig
;
503 uint64_t helper_g_to_memory (uint64_t a
)
506 r
= (a
& 0x000000000000ffffull
) << 48;
507 r
|= (a
& 0x00000000ffff0000ull
) << 16;
508 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
509 r
|= (a
& 0xffff000000000000ull
) >> 48;
513 uint64_t helper_memory_to_g (uint64_t a
)
516 r
= (a
& 0x000000000000ffffull
) << 48;
517 r
|= (a
& 0x00000000ffff0000ull
) << 16;
518 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
519 r
|= (a
& 0xffff000000000000ull
) >> 48;
523 uint64_t helper_addg (uint64_t a
, uint64_t b
)
527 fa
= g_to_float64(a
);
528 fb
= g_to_float64(b
);
529 fr
= float64_add(fa
, fb
, &FP_STATUS
);
530 return float64_to_g(fr
);
533 uint64_t helper_subg (uint64_t a
, uint64_t b
)
537 fa
= g_to_float64(a
);
538 fb
= g_to_float64(b
);
539 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
540 return float64_to_g(fr
);
543 uint64_t helper_mulg (uint64_t a
, uint64_t b
)
547 fa
= g_to_float64(a
);
548 fb
= g_to_float64(b
);
549 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
550 return float64_to_g(fr
);
553 uint64_t helper_divg (uint64_t a
, uint64_t b
)
557 fa
= g_to_float64(a
);
558 fb
= g_to_float64(b
);
559 fr
= float64_div(fa
, fb
, &FP_STATUS
);
560 return float64_to_g(fr
);
563 uint64_t helper_sqrtg (uint64_t a
)
567 fa
= g_to_float64(a
);
568 fr
= float64_sqrt(fa
, &FP_STATUS
);
569 return float64_to_g(fr
);
573 /* S floating (single) */
574 static inline uint64_t float32_to_s(float32 fa
)
581 r
= (((uint64_t)(a
.l
& 0xc0000000)) << 32) | (((uint64_t)(a
.l
& 0x3fffffff)) << 29);
582 if (((a
.l
& 0x7f800000) != 0x7f800000) && (!(a
.l
& 0x40000000)))
587 static inline float32
s_to_float32(uint64_t a
)
590 r
.l
= ((a
>> 32) & 0xc0000000) | ((a
>> 29) & 0x3fffffff);
594 uint32_t helper_s_to_memory (uint64_t a
)
596 /* Memory format is the same as float32 */
597 float32 fa
= s_to_float32(a
);
598 return *(uint32_t*)(&fa
);
601 uint64_t helper_memory_to_s (uint32_t a
)
603 /* Memory format is the same as float32 */
604 return float32_to_s(*(float32
*)(&a
));
607 uint64_t helper_adds (uint64_t a
, uint64_t b
)
611 fa
= s_to_float32(a
);
612 fb
= s_to_float32(b
);
613 fr
= float32_add(fa
, fb
, &FP_STATUS
);
614 return float32_to_s(fr
);
617 uint64_t helper_subs (uint64_t a
, uint64_t b
)
621 fa
= s_to_float32(a
);
622 fb
= s_to_float32(b
);
623 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
624 return float32_to_s(fr
);
627 uint64_t helper_muls (uint64_t a
, uint64_t b
)
631 fa
= s_to_float32(a
);
632 fb
= s_to_float32(b
);
633 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
634 return float32_to_s(fr
);
637 uint64_t helper_divs (uint64_t a
, uint64_t b
)
641 fa
= s_to_float32(a
);
642 fb
= s_to_float32(b
);
643 fr
= float32_div(fa
, fb
, &FP_STATUS
);
644 return float32_to_s(fr
);
647 uint64_t helper_sqrts (uint64_t a
)
651 fa
= s_to_float32(a
);
652 fr
= float32_sqrt(fa
, &FP_STATUS
);
653 return float32_to_s(fr
);
657 /* T floating (double) */
658 static inline float64
t_to_float64(uint64_t a
)
660 /* Memory format is the same as float64 */
666 static inline uint64_t float64_to_t(float64 fa
)
668 /* Memory format is the same as float64 */
674 uint64_t helper_addt (uint64_t a
, uint64_t b
)
678 fa
= t_to_float64(a
);
679 fb
= t_to_float64(b
);
680 fr
= float64_add(fa
, fb
, &FP_STATUS
);
681 return float64_to_t(fr
);
684 uint64_t helper_subt (uint64_t a
, uint64_t b
)
688 fa
= t_to_float64(a
);
689 fb
= t_to_float64(b
);
690 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
691 return float64_to_t(fr
);
694 uint64_t helper_mult (uint64_t a
, uint64_t b
)
698 fa
= t_to_float64(a
);
699 fb
= t_to_float64(b
);
700 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
701 return float64_to_t(fr
);
704 uint64_t helper_divt (uint64_t a
, uint64_t b
)
708 fa
= t_to_float64(a
);
709 fb
= t_to_float64(b
);
710 fr
= float64_div(fa
, fb
, &FP_STATUS
);
711 return float64_to_t(fr
);
714 uint64_t helper_sqrtt (uint64_t a
)
718 fa
= t_to_float64(a
);
719 fr
= float64_sqrt(fa
, &FP_STATUS
);
720 return float64_to_t(fr
);
725 uint64_t helper_cpys(uint64_t a
, uint64_t b
)
727 return (a
& 0x8000000000000000ULL
) | (b
& ~0x8000000000000000ULL
);
730 uint64_t helper_cpysn(uint64_t a
, uint64_t b
)
732 return ((~a
) & 0x8000000000000000ULL
) | (b
& ~0x8000000000000000ULL
);
735 uint64_t helper_cpyse(uint64_t a
, uint64_t b
)
737 return (a
& 0xFFF0000000000000ULL
) | (b
& ~0xFFF0000000000000ULL
);
742 uint64_t helper_cmptun (uint64_t a
, uint64_t b
)
746 fa
= t_to_float64(a
);
747 fb
= t_to_float64(b
);
749 if (float64_is_nan(fa
) || float64_is_nan(fb
))
750 return 0x4000000000000000ULL
;
755 uint64_t helper_cmpteq(uint64_t a
, uint64_t b
)
759 fa
= t_to_float64(a
);
760 fb
= t_to_float64(b
);
762 if (float64_eq(fa
, fb
, &FP_STATUS
))
763 return 0x4000000000000000ULL
;
768 uint64_t helper_cmptle(uint64_t a
, uint64_t b
)
772 fa
= t_to_float64(a
);
773 fb
= t_to_float64(b
);
775 if (float64_le(fa
, fb
, &FP_STATUS
))
776 return 0x4000000000000000ULL
;
781 uint64_t helper_cmptlt(uint64_t a
, uint64_t b
)
785 fa
= t_to_float64(a
);
786 fb
= t_to_float64(b
);
788 if (float64_lt(fa
, fb
, &FP_STATUS
))
789 return 0x4000000000000000ULL
;
794 uint64_t helper_cmpgeq(uint64_t a
, uint64_t b
)
798 fa
= g_to_float64(a
);
799 fb
= g_to_float64(b
);
801 if (float64_eq(fa
, fb
, &FP_STATUS
))
802 return 0x4000000000000000ULL
;
807 uint64_t helper_cmpgle(uint64_t a
, uint64_t b
)
811 fa
= g_to_float64(a
);
812 fb
= g_to_float64(b
);
814 if (float64_le(fa
, fb
, &FP_STATUS
))
815 return 0x4000000000000000ULL
;
820 uint64_t helper_cmpglt(uint64_t a
, uint64_t b
)
824 fa
= g_to_float64(a
);
825 fb
= g_to_float64(b
);
827 if (float64_lt(fa
, fb
, &FP_STATUS
))
828 return 0x4000000000000000ULL
;
833 uint64_t helper_cmpfeq (uint64_t a
)
835 return !(a
& 0x7FFFFFFFFFFFFFFFULL
);
838 uint64_t helper_cmpfne (uint64_t a
)
840 return (a
& 0x7FFFFFFFFFFFFFFFULL
);
843 uint64_t helper_cmpflt (uint64_t a
)
845 return (a
& 0x8000000000000000ULL
) && (a
& 0x7FFFFFFFFFFFFFFFULL
);
848 uint64_t helper_cmpfle (uint64_t a
)
850 return (a
& 0x8000000000000000ULL
) || !(a
& 0x7FFFFFFFFFFFFFFFULL
);
853 uint64_t helper_cmpfgt (uint64_t a
)
855 return !(a
& 0x8000000000000000ULL
) && (a
& 0x7FFFFFFFFFFFFFFFULL
);
858 uint64_t helper_cmpfge (uint64_t a
)
860 return !(a
& 0x8000000000000000ULL
) || !(a
& 0x7FFFFFFFFFFFFFFFULL
);
864 /* Floating point format conversion */
865 uint64_t helper_cvtts (uint64_t a
)
870 fa
= t_to_float64(a
);
871 fr
= float64_to_float32(fa
, &FP_STATUS
);
872 return float32_to_s(fr
);
875 uint64_t helper_cvtst (uint64_t a
)
880 fa
= s_to_float32(a
);
881 fr
= float32_to_float64(fa
, &FP_STATUS
);
882 return float64_to_t(fr
);
885 uint64_t helper_cvtqs (uint64_t a
)
887 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
888 return float32_to_s(fr
);
891 uint64_t helper_cvttq (uint64_t a
)
893 float64 fa
= t_to_float64(a
);
894 return float64_to_int64_round_to_zero(fa
, &FP_STATUS
);
897 uint64_t helper_cvtqt (uint64_t a
)
899 float64 fr
= int64_to_float64(a
, &FP_STATUS
);
900 return float64_to_t(fr
);
903 uint64_t helper_cvtqf (uint64_t a
)
905 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
906 return float32_to_f(fr
);
909 uint64_t helper_cvtgf (uint64_t a
)
914 fa
= g_to_float64(a
);
915 fr
= float64_to_float32(fa
, &FP_STATUS
);
916 return float32_to_f(fr
);
919 uint64_t helper_cvtgq (uint64_t a
)
921 float64 fa
= g_to_float64(a
);
922 return float64_to_int64_round_to_zero(fa
, &FP_STATUS
);
925 uint64_t helper_cvtqg (uint64_t a
)
928 fr
= int64_to_float64(a
, &FP_STATUS
);
929 return float64_to_g(fr
);
932 uint64_t helper_cvtlq (uint64_t a
)
934 return (int64_t)((int32_t)((a
>> 32) | ((a
>> 29) & 0x3FFFFFFF)));
937 static inline uint64_t __helper_cvtql(uint64_t a
, int s
, int v
)
941 r
= ((uint64_t)(a
& 0xC0000000)) << 32;
942 r
|= ((uint64_t)(a
& 0x7FFFFFFF)) << 29;
944 if (v
&& (int64_t)((int32_t)r
) != (int64_t)r
) {
945 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
953 uint64_t helper_cvtql (uint64_t a
)
955 return __helper_cvtql(a
, 0, 0);
958 uint64_t helper_cvtqlv (uint64_t a
)
960 return __helper_cvtql(a
, 0, 1);
963 uint64_t helper_cvtqlsv (uint64_t a
)
965 return __helper_cvtql(a
, 1, 1);
968 /* PALcode support special instructions */
969 #if !defined (CONFIG_USER_ONLY)
970 void helper_hw_rei (void)
972 env
->pc
= env
->ipr
[IPR_EXC_ADDR
] & ~3;
973 env
->ipr
[IPR_EXC_ADDR
] = env
->ipr
[IPR_EXC_ADDR
] & 1;
974 /* XXX: re-enable interrupts and memory mapping */
977 void helper_hw_ret (uint64_t a
)
980 env
->ipr
[IPR_EXC_ADDR
] = a
& 1;
981 /* XXX: re-enable interrupts and memory mapping */
984 uint64_t helper_mfpr (int iprn
, uint64_t val
)
988 if (cpu_alpha_mfpr(env
, iprn
, &tmp
) == 0)
994 void helper_mtpr (int iprn
, uint64_t val
)
996 cpu_alpha_mtpr(env
, iprn
, val
, NULL
);
999 void helper_set_alt_mode (void)
1001 env
->saved_mode
= env
->ps
& 0xC;
1002 env
->ps
= (env
->ps
& ~0xC) | (env
->ipr
[IPR_ALT_MODE
] & 0xC);
1005 void helper_restore_mode (void)
1007 env
->ps
= (env
->ps
& ~0xC) | env
->saved_mode
;
1012 /*****************************************************************************/
1013 /* Softmmu support */
1014 #if !defined (CONFIG_USER_ONLY)
1016 /* XXX: the two following helpers are pure hacks.
1017 * Hopefully, we emulate the PALcode, then we should never see
1018 * HW_LD / HW_ST instructions.
1020 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr
)
1022 uint64_t tlb_addr
, physaddr
;
1026 mmu_idx
= cpu_mmu_index(env
);
1027 index
= (virtaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1029 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_read
;
1030 if ((virtaddr
& TARGET_PAGE_MASK
) ==
1031 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1032 physaddr
= virtaddr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
1034 /* the page is not in the TLB : fill it */
1036 tlb_fill(virtaddr
, 0, mmu_idx
, retaddr
);
1042 uint64_t helper_st_virt_to_phys (uint64_t virtaddr
)
1044 uint64_t tlb_addr
, physaddr
;
1048 mmu_idx
= cpu_mmu_index(env
);
1049 index
= (virtaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1051 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
1052 if ((virtaddr
& TARGET_PAGE_MASK
) ==
1053 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1054 physaddr
= virtaddr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
1056 /* the page is not in the TLB : fill it */
1058 tlb_fill(virtaddr
, 1, mmu_idx
, retaddr
);
1064 void helper_ldl_raw(uint64_t t0
, uint64_t t1
)
1069 void helper_ldq_raw(uint64_t t0
, uint64_t t1
)
1074 void helper_ldl_l_raw(uint64_t t0
, uint64_t t1
)
1080 void helper_ldq_l_raw(uint64_t t0
, uint64_t t1
)
1086 void helper_ldl_kernel(uint64_t t0
, uint64_t t1
)
1091 void helper_ldq_kernel(uint64_t t0
, uint64_t t1
)
1096 void helper_ldl_data(uint64_t t0
, uint64_t t1
)
1101 void helper_ldq_data(uint64_t t0
, uint64_t t1
)
1106 void helper_stl_raw(uint64_t t0
, uint64_t t1
)
1111 void helper_stq_raw(uint64_t t0
, uint64_t t1
)
1116 uint64_t helper_stl_c_raw(uint64_t t0
, uint64_t t1
)
1120 if (t1
== env
->lock
) {
1131 uint64_t helper_stq_c_raw(uint64_t t0
, uint64_t t1
)
1135 if (t1
== env
->lock
) {
1146 #define MMUSUFFIX _mmu
1149 #include "softmmu_template.h"
1152 #include "softmmu_template.h"
1155 #include "softmmu_template.h"
1158 #include "softmmu_template.h"
1160 /* try to fill the TLB and return an exception if error. If retaddr is
1161 NULL, it means that the function was called in C code (i.e. not
1162 from generated code or from helper.c) */
1163 /* XXX: fix it to restore all registers */
1164 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
1166 TranslationBlock
*tb
;
1167 CPUState
*saved_env
;
1171 /* XXX: hack to restore env in all cases, even if not called from
1174 env
= cpu_single_env
;
1175 ret
= cpu_alpha_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
1176 if (!likely(ret
== 0)) {
1177 if (likely(retaddr
)) {
1178 /* now we have a real cpu fault */
1179 pc
= (unsigned long)retaddr
;
1180 tb
= tb_find_pc(pc
);
1182 /* the PC is inside the translated code. It means that we have
1183 a virtual CPU fault */
1184 cpu_restore_state(tb
, env
, pc
, NULL
);
1187 /* Exception index and error code are already set */