Replace tabs by 8 spaces. No code change, by Herve Poussineau.
[qemu/dscho.git] / target-alpha / op_helper.c
blob746665a429448bb38e9b49dd17f3353adeaf37ab
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "exec.h"
22 #include "softfloat.h"
24 #include "op_helper.h"
26 #define MEMSUFFIX _raw
27 #include "op_helper_mem.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #define MEMSUFFIX _user
31 #include "op_helper_mem.h"
33 #define MEMSUFFIX _kernel
34 #include "op_helper_mem.h"
36 /* Those are used for supervisor and executive modes */
37 #define MEMSUFFIX _data
38 #include "op_helper_mem.h"
39 #endif
41 void helper_tb_flush (void)
43 tlb_flush(env, 1);
46 void cpu_dump_EA (target_ulong EA);
47 void helper_print_mem_EA (target_ulong EA)
49 cpu_dump_EA(EA);
52 /*****************************************************************************/
53 /* Exceptions processing helpers */
54 void helper_excp (uint32_t excp, uint32_t error)
56 env->exception_index = excp;
57 env->error_code = error;
58 cpu_loop_exit();
61 void helper_amask (void)
63 switch (env->implver) {
64 case IMPLVER_2106x:
65 /* EV4, EV45, LCA, LCA45 & EV5 */
66 break;
67 case IMPLVER_21164:
68 case IMPLVER_21264:
69 case IMPLVER_21364:
70 T0 &= ~env->amask;
71 break;
75 void helper_load_pcc (void)
77 /* XXX: TODO */
78 T0 = 0;
81 void helper_load_implver (void)
83 T0 = env->implver;
86 void helper_load_fpcr (void)
88 T0 = 0;
89 #ifdef CONFIG_SOFTFLOAT
90 T0 |= env->fp_status.float_exception_flags << 52;
91 if (env->fp_status.float_exception_flags)
92 T0 |= 1ULL << 63;
93 env->ipr[IPR_EXC_SUM] &= ~0x3E:
94 env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
95 #endif
96 switch (env->fp_status.float_rounding_mode) {
97 case float_round_nearest_even:
98 T0 |= 2ULL << 58;
99 break;
100 case float_round_down:
101 T0 |= 1ULL << 58;
102 break;
103 case float_round_up:
104 T0 |= 3ULL << 58;
105 break;
106 case float_round_to_zero:
107 break;
111 void helper_store_fpcr (void)
113 #ifdef CONFIG_SOFTFLOAT
114 set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
115 #endif
116 switch ((T0 >> 58) & 3) {
117 case 0:
118 set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
119 break;
120 case 1:
121 set_float_rounding_mode(float_round_down, &FP_STATUS);
122 break;
123 case 2:
124 set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
125 break;
126 case 3:
127 set_float_rounding_mode(float_round_up, &FP_STATUS);
128 break;
132 void helper_load_irf (void)
134 /* XXX: TODO */
135 T0 = 0;
138 void helper_set_irf (void)
140 /* XXX: TODO */
143 void helper_clear_irf (void)
145 /* XXX: TODO */
148 void helper_addqv (void)
150 T2 = T0;
151 T0 += T1;
152 if (unlikely((T2 ^ T1 ^ (-1ULL)) & (T2 ^ T0) & (1ULL << 63))) {
153 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
157 void helper_addlv (void)
159 T2 = T0;
160 T0 = (uint32_t)(T0 + T1);
161 if (unlikely((T2 ^ T1 ^ (-1UL)) & (T2 ^ T0) & (1UL << 31))) {
162 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
166 void helper_subqv (void)
168 T2 = T0;
169 T0 -= T1;
170 if (unlikely(((~T2) ^ T0 ^ (-1ULL)) & ((~T2) ^ T1) & (1ULL << 63))) {
171 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
175 void helper_sublv (void)
177 T2 = T0;
178 T0 = (uint32_t)(T0 - T1);
179 if (unlikely(((~T2) ^ T0 ^ (-1UL)) & ((~T2) ^ T1) & (1UL << 31))) {
180 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
184 void helper_mullv (void)
186 int64_t res = (int64_t)T0 * (int64_t)T1;
188 if (unlikely((int32_t)res != res)) {
189 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
191 T0 = (int64_t)((int32_t)res);
194 void helper_mulqv ()
196 uint64_t res, tmp0, tmp1;
198 res = (T0 >> 32) * (T1 >> 32);
199 tmp0 = ((T0 & 0xFFFFFFFF) * (T1 >> 32)) +
200 ((T0 >> 32) * (T1 & 0xFFFFFFFF));
201 tmp1 = (T0 & 0xFFFFFFFF) * (T1 & 0xFFFFFFFF);
202 tmp0 += tmp1 >> 32;
203 res += tmp0 >> 32;
204 T0 *= T1;
205 if (unlikely(res != 0)) {
206 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
210 void helper_umulh (void)
212 uint64_t tmp0, tmp1;
214 tmp0 = ((T0 & 0xFFFFFFFF) * (T1 >> 32)) +
215 ((T0 >> 32) * (T1 & 0xFFFFFFFF));
216 tmp1 = (T0 & 0xFFFFFFFF) * (T1 & 0xFFFFFFFF);
217 tmp0 += tmp1 >> 32;
218 T0 = (T0 >> 32) * (T0 >> 32);
219 T0 += tmp0 >> 32;
222 void helper_ctpop (void)
224 int n;
226 for (n = 0; T0 != 0; n++)
227 T0 = T0 ^ (T0 - 1);
228 T0 = n;
231 void helper_ctlz (void)
233 uint32_t op32;
234 int n;
236 n = 0;
237 if (!(T0 & 0xFFFFFFFF00000000ULL)) {
238 n += 32;
239 T0 <<= 32;
241 /* Make it easier for 32 bits hosts */
242 op32 = T0 >> 32;
243 if (!(op32 & 0xFFFF0000UL)) {
244 n += 16;
245 op32 <<= 16;
247 if (!(op32 & 0xFF000000UL)) {
248 n += 8;
249 op32 <<= 8;
251 if (!(op32 & 0xF0000000UL)) {
252 n += 4;
253 op32 <<= 4;
255 if (!(op32 & 0xC0000000UL)) {
256 n += 2;
257 op32 <<= 2;
259 if (!(op32 & 0x80000000UL)) {
260 n++;
261 op32 <<= 1;
263 if (!(op32 & 0x80000000UL)) {
264 n++;
266 T0 = n;
269 void helper_cttz (void)
271 uint32_t op32;
272 int n;
274 n = 0;
275 if (!(T0 & 0x00000000FFFFFFFFULL)) {
276 n += 32;
277 T0 >>= 32;
279 /* Make it easier for 32 bits hosts */
280 op32 = T0;
281 if (!(op32 & 0x0000FFFFUL)) {
282 n += 16;
283 op32 >>= 16;
285 if (!(op32 & 0x000000FFUL)) {
286 n += 8;
287 op32 >>= 8;
289 if (!(op32 & 0x0000000FUL)) {
290 n += 4;
291 op32 >>= 4;
293 if (!(op32 & 0x00000003UL)) {
294 n += 2;
295 op32 >>= 2;
297 if (!(op32 & 0x00000001UL)) {
298 n++;
299 op32 >>= 1;
301 if (!(op32 & 0x00000001UL)) {
302 n++;
304 T0 = n;
307 static inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
309 uint64_t mask;
311 mask = 0;
312 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
313 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
314 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
315 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
316 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
317 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
318 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
319 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
321 return op & ~mask;
324 void helper_mskbl (void)
326 T0 = byte_zap(T0, 0x01 << (T1 & 7));
329 void helper_extbl (void)
331 T0 >>= (T1 & 7) * 8;
332 T0 = byte_zap(T0, 0xFE);
335 void helper_insbl (void)
337 T0 <<= (T1 & 7) * 8;
338 T0 = byte_zap(T0, ~(0x01 << (T1 & 7)));
341 void helper_mskwl (void)
343 T0 = byte_zap(T0, 0x03 << (T1 & 7));
346 void helper_extwl (void)
348 T0 >>= (T1 & 7) * 8;
349 T0 = byte_zap(T0, 0xFC);
352 void helper_inswl (void)
354 T0 <<= (T1 & 7) * 8;
355 T0 = byte_zap(T0, ~(0x03 << (T1 & 7)));
358 void helper_mskll (void)
360 T0 = byte_zap(T0, 0x0F << (T1 & 7));
363 void helper_extll (void)
365 T0 >>= (T1 & 7) * 8;
366 T0 = byte_zap(T0, 0xF0);
369 void helper_insll (void)
371 T0 <<= (T1 & 7) * 8;
372 T0 = byte_zap(T0, ~(0x0F << (T1 & 7)));
375 void helper_zap (void)
377 T0 = byte_zap(T0, T1);
380 void helper_zapnot (void)
382 T0 = byte_zap(T0, ~T1);
385 void helper_mskql (void)
387 T0 = byte_zap(T0, 0xFF << (T1 & 7));
390 void helper_extql (void)
392 T0 >>= (T1 & 7) * 8;
393 T0 = byte_zap(T0, 0x00);
396 void helper_insql (void)
398 T0 <<= (T1 & 7) * 8;
399 T0 = byte_zap(T0, ~(0xFF << (T1 & 7)));
402 void helper_mskwh (void)
404 T0 = byte_zap(T0, (0x03 << (T1 & 7)) >> 8);
407 void helper_inswh (void)
409 T0 >>= 64 - ((T1 & 7) * 8);
410 T0 = byte_zap(T0, ~((0x03 << (T1 & 7)) >> 8));
413 void helper_extwh (void)
415 T0 <<= 64 - ((T1 & 7) * 8);
416 T0 = byte_zap(T0, ~0x07);
419 void helper_msklh (void)
421 T0 = byte_zap(T0, (0x0F << (T1 & 7)) >> 8);
424 void helper_inslh (void)
426 T0 >>= 64 - ((T1 & 7) * 8);
427 T0 = byte_zap(T0, ~((0x0F << (T1 & 7)) >> 8));
430 void helper_extlh (void)
432 T0 <<= 64 - ((T1 & 7) * 8);
433 T0 = byte_zap(T0, ~0x0F);
436 void helper_mskqh (void)
438 T0 = byte_zap(T0, (0xFF << (T1 & 7)) >> 8);
441 void helper_insqh (void)
443 T0 >>= 64 - ((T1 & 7) * 8);
444 T0 = byte_zap(T0, ~((0xFF << (T1 & 7)) >> 8));
447 void helper_extqh (void)
449 T0 <<= 64 - ((T1 & 7) * 8);
450 T0 = byte_zap(T0, 0x00);
453 void helper_cmpbge (void)
455 uint8_t opa, opb, res;
456 int i;
458 res = 0;
459 for (i = 0; i < 7; i++) {
460 opa = T0 >> (i * 8);
461 opb = T1 >> (i * 8);
462 if (opa >= opb)
463 res |= 1 << i;
465 T0 = res;
468 void helper_cmov_fir (int freg)
470 if (FT0 != 0)
471 env->fir[freg] = FT1;
474 void helper_sqrts (void)
476 FT0 = float32_sqrt(FT0, &FP_STATUS);
479 void helper_cpys (void)
481 union {
482 double d;
483 uint64_t i;
484 } p, q, r;
486 p.d = FT0;
487 q.d = FT1;
488 r.i = p.i & 0x8000000000000000ULL;
489 r.i |= q.i & ~0x8000000000000000ULL;
490 FT0 = r.d;
493 void helper_cpysn (void)
495 union {
496 double d;
497 uint64_t i;
498 } p, q, r;
500 p.d = FT0;
501 q.d = FT1;
502 r.i = (~p.i) & 0x8000000000000000ULL;
503 r.i |= q.i & ~0x8000000000000000ULL;
504 FT0 = r.d;
507 void helper_cpyse (void)
509 union {
510 double d;
511 uint64_t i;
512 } p, q, r;
514 p.d = FT0;
515 q.d = FT1;
516 r.i = p.i & 0xFFF0000000000000ULL;
517 r.i |= q.i & ~0xFFF0000000000000ULL;
518 FT0 = r.d;
521 void helper_itofs (void)
523 union {
524 double d;
525 uint64_t i;
526 } p;
528 p.d = FT0;
529 FT0 = int64_to_float32(p.i, &FP_STATUS);
532 void helper_ftois (void)
534 union {
535 double d;
536 uint64_t i;
537 } p;
539 p.i = float32_to_int64(FT0, &FP_STATUS);
540 FT0 = p.d;
543 void helper_sqrtt (void)
545 FT0 = float64_sqrt(FT0, &FP_STATUS);
548 void helper_cmptun (void)
550 union {
551 double d;
552 uint64_t i;
553 } p;
555 p.i = 0;
556 if (float64_is_nan(FT0) || float64_is_nan(FT1))
557 p.i = 0x4000000000000000ULL;
558 FT0 = p.d;
561 void helper_cmpteq (void)
563 union {
564 double d;
565 uint64_t i;
566 } p;
568 p.i = 0;
569 if (float64_eq(FT0, FT1, &FP_STATUS))
570 p.i = 0x4000000000000000ULL;
571 FT0 = p.d;
574 void helper_cmptle (void)
576 union {
577 double d;
578 uint64_t i;
579 } p;
581 p.i = 0;
582 if (float64_le(FT0, FT1, &FP_STATUS))
583 p.i = 0x4000000000000000ULL;
584 FT0 = p.d;
587 void helper_cmptlt (void)
589 union {
590 double d;
591 uint64_t i;
592 } p;
594 p.i = 0;
595 if (float64_lt(FT0, FT1, &FP_STATUS))
596 p.i = 0x4000000000000000ULL;
597 FT0 = p.d;
600 void helper_itoft (void)
602 union {
603 double d;
604 uint64_t i;
605 } p;
607 p.d = FT0;
608 FT0 = int64_to_float64(p.i, &FP_STATUS);
611 void helper_ftoit (void)
613 union {
614 double d;
615 uint64_t i;
616 } p;
618 p.i = float64_to_int64(FT0, &FP_STATUS);
619 FT0 = p.d;
622 static int vaxf_is_valid (float ff)
624 union {
625 float f;
626 uint32_t i;
627 } p;
628 uint32_t exp, mant;
630 p.f = ff;
631 exp = (p.i >> 23) & 0xFF;
632 mant = p.i & 0x007FFFFF;
633 if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
634 /* Reserved operands / Dirty zero */
635 return 0;
638 return 1;
641 static float vaxf_to_ieee32 (float ff)
643 union {
644 float f;
645 uint32_t i;
646 } p;
647 uint32_t exp;
649 p.f = ff;
650 exp = (p.i >> 23) & 0xFF;
651 if (exp < 3) {
652 /* Underflow */
653 p.f = 0.0;
654 } else {
655 p.f *= 0.25;
658 return p.f;
661 static float ieee32_to_vaxf (float fi)
663 union {
664 float f;
665 uint32_t i;
666 } p;
667 uint32_t exp, mant;
669 p.f = fi;
670 exp = (p.i >> 23) & 0xFF;
671 mant = p.i & 0x007FFFFF;
672 if (exp == 255) {
673 /* NaN or infinity */
674 p.i = 1;
675 } else if (exp == 0) {
676 if (mant == 0) {
677 /* Zero */
678 p.i = 0;
679 } else {
680 /* Denormalized */
681 p.f *= 2.0;
683 } else {
684 if (exp >= 253) {
685 /* Overflow */
686 p.i = 1;
687 } else {
688 p.f *= 4.0;
692 return p.f;
695 void helper_addf (void)
697 float ft0, ft1, ft2;
699 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
700 /* XXX: TODO */
702 ft0 = vaxf_to_ieee32(FT0);
703 ft1 = vaxf_to_ieee32(FT1);
704 ft2 = float32_add(ft0, ft1, &FP_STATUS);
705 FT0 = ieee32_to_vaxf(ft2);
708 void helper_subf (void)
710 float ft0, ft1, ft2;
712 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
713 /* XXX: TODO */
715 ft0 = vaxf_to_ieee32(FT0);
716 ft1 = vaxf_to_ieee32(FT1);
717 ft2 = float32_sub(ft0, ft1, &FP_STATUS);
718 FT0 = ieee32_to_vaxf(ft2);
721 void helper_mulf (void)
723 float ft0, ft1, ft2;
725 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
726 /* XXX: TODO */
728 ft0 = vaxf_to_ieee32(FT0);
729 ft1 = vaxf_to_ieee32(FT1);
730 ft2 = float32_mul(ft0, ft1, &FP_STATUS);
731 FT0 = ieee32_to_vaxf(ft2);
734 void helper_divf (void)
736 float ft0, ft1, ft2;
738 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
739 /* XXX: TODO */
741 ft0 = vaxf_to_ieee32(FT0);
742 ft1 = vaxf_to_ieee32(FT1);
743 ft2 = float32_div(ft0, ft1, &FP_STATUS);
744 FT0 = ieee32_to_vaxf(ft2);
747 void helper_sqrtf (void)
749 float ft0, ft1;
751 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
752 /* XXX: TODO */
754 ft0 = vaxf_to_ieee32(FT0);
755 ft1 = float32_sqrt(ft0, &FP_STATUS);
756 FT0 = ieee32_to_vaxf(ft1);
759 void helper_itoff (void)
761 /* XXX: TODO */
764 static int vaxg_is_valid (double ff)
766 union {
767 double f;
768 uint64_t i;
769 } p;
770 uint64_t exp, mant;
772 p.f = ff;
773 exp = (p.i >> 52) & 0x7FF;
774 mant = p.i & 0x000FFFFFFFFFFFFFULL;
775 if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
776 /* Reserved operands / Dirty zero */
777 return 0;
780 return 1;
783 static double vaxg_to_ieee64 (double fg)
785 union {
786 double f;
787 uint64_t i;
788 } p;
789 uint32_t exp;
791 p.f = fg;
792 exp = (p.i >> 52) & 0x7FF;
793 if (exp < 3) {
794 /* Underflow */
795 p.f = 0.0;
796 } else {
797 p.f *= 0.25;
800 return p.f;
803 static double ieee64_to_vaxg (double fi)
805 union {
806 double f;
807 uint64_t i;
808 } p;
809 uint64_t mant;
810 uint32_t exp;
812 p.f = fi;
813 exp = (p.i >> 52) & 0x7FF;
814 mant = p.i & 0x000FFFFFFFFFFFFFULL;
815 if (exp == 255) {
816 /* NaN or infinity */
817 p.i = 1; /* VAX dirty zero */
818 } else if (exp == 0) {
819 if (mant == 0) {
820 /* Zero */
821 p.i = 0;
822 } else {
823 /* Denormalized */
824 p.f *= 2.0;
826 } else {
827 if (exp >= 2045) {
828 /* Overflow */
829 p.i = 1; /* VAX dirty zero */
830 } else {
831 p.f *= 4.0;
835 return p.f;
838 void helper_addg (void)
840 double ft0, ft1, ft2;
842 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
843 /* XXX: TODO */
845 ft0 = vaxg_to_ieee64(FT0);
846 ft1 = vaxg_to_ieee64(FT1);
847 ft2 = float64_add(ft0, ft1, &FP_STATUS);
848 FT0 = ieee64_to_vaxg(ft2);
851 void helper_subg (void)
853 double ft0, ft1, ft2;
855 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
856 /* XXX: TODO */
858 ft0 = vaxg_to_ieee64(FT0);
859 ft1 = vaxg_to_ieee64(FT1);
860 ft2 = float64_sub(ft0, ft1, &FP_STATUS);
861 FT0 = ieee64_to_vaxg(ft2);
864 void helper_mulg (void)
866 double ft0, ft1, ft2;
868 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
869 /* XXX: TODO */
871 ft0 = vaxg_to_ieee64(FT0);
872 ft1 = vaxg_to_ieee64(FT1);
873 ft2 = float64_mul(ft0, ft1, &FP_STATUS);
874 FT0 = ieee64_to_vaxg(ft2);
877 void helper_divg (void)
879 double ft0, ft1, ft2;
881 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
882 /* XXX: TODO */
884 ft0 = vaxg_to_ieee64(FT0);
885 ft1 = vaxg_to_ieee64(FT1);
886 ft2 = float64_div(ft0, ft1, &FP_STATUS);
887 FT0 = ieee64_to_vaxg(ft2);
890 void helper_sqrtg (void)
892 double ft0, ft1;
894 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
895 /* XXX: TODO */
897 ft0 = vaxg_to_ieee64(FT0);
898 ft1 = float64_sqrt(ft0, &FP_STATUS);
899 FT0 = ieee64_to_vaxg(ft1);
902 void helper_cmpgeq (void)
904 union {
905 double d;
906 uint64_t u;
907 } p;
908 double ft0, ft1;
910 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
911 /* XXX: TODO */
913 ft0 = vaxg_to_ieee64(FT0);
914 ft1 = vaxg_to_ieee64(FT1);
915 p.u = 0;
916 if (float64_eq(ft0, ft1, &FP_STATUS))
917 p.u = 0x4000000000000000ULL;
918 FT0 = p.d;
921 void helper_cmpglt (void)
923 union {
924 double d;
925 uint64_t u;
926 } p;
927 double ft0, ft1;
929 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
930 /* XXX: TODO */
932 ft0 = vaxg_to_ieee64(FT0);
933 ft1 = vaxg_to_ieee64(FT1);
934 p.u = 0;
935 if (float64_lt(ft0, ft1, &FP_STATUS))
936 p.u = 0x4000000000000000ULL;
937 FT0 = p.d;
940 void helper_cmpgle (void)
942 union {
943 double d;
944 uint64_t u;
945 } p;
946 double ft0, ft1;
948 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
949 /* XXX: TODO */
951 ft0 = vaxg_to_ieee64(FT0);
952 ft1 = vaxg_to_ieee64(FT1);
953 p.u = 0;
954 if (float64_le(ft0, ft1, &FP_STATUS))
955 p.u = 0x4000000000000000ULL;
956 FT0 = p.d;
959 void helper_cvtqs (void)
961 union {
962 double d;
963 uint64_t u;
964 } p;
966 p.d = FT0;
967 FT0 = (float)p.u;
970 void helper_cvttq (void)
972 union {
973 double d;
974 uint64_t u;
975 } p;
977 p.u = FT0;
978 FT0 = p.d;
981 void helper_cvtqt (void)
983 union {
984 double d;
985 uint64_t u;
986 } p;
988 p.d = FT0;
989 FT0 = p.u;
992 void helper_cvtqf (void)
994 union {
995 double d;
996 uint64_t u;
997 } p;
999 p.d = FT0;
1000 FT0 = ieee32_to_vaxf(p.u);
1003 void helper_cvtgf (void)
1005 double ft0;
1007 ft0 = vaxg_to_ieee64(FT0);
1008 FT0 = ieee32_to_vaxf(ft0);
1011 void helper_cvtgd (void)
1013 /* XXX: TODO */
1016 void helper_cvtgq (void)
1018 union {
1019 double d;
1020 uint64_t u;
1021 } p;
1023 p.u = vaxg_to_ieee64(FT0);
1024 FT0 = p.d;
1027 void helper_cvtqg (void)
1029 union {
1030 double d;
1031 uint64_t u;
1032 } p;
1034 p.d = FT0;
1035 FT0 = ieee64_to_vaxg(p.u);
1038 void helper_cvtdg (void)
1040 /* XXX: TODO */
1043 void helper_cvtlq (void)
1045 union {
1046 double d;
1047 uint64_t u;
1048 } p, q;
1050 p.d = FT0;
1051 q.u = (p.u >> 29) & 0x3FFFFFFF;
1052 q.u |= (p.u >> 32);
1053 q.u = (int64_t)((int32_t)q.u);
1054 FT0 = q.d;
1057 static inline void __helper_cvtql (int s, int v)
1059 union {
1060 double d;
1061 uint64_t u;
1062 } p, q;
1064 p.d = FT0;
1065 q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
1066 q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
1067 FT0 = q.d;
1068 if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
1069 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
1071 if (s) {
1072 /* TODO */
1076 void helper_cvtql (void)
1078 __helper_cvtql(0, 0);
1081 void helper_cvtqlv (void)
1083 __helper_cvtql(0, 1);
1086 void helper_cvtqlsv (void)
1088 __helper_cvtql(1, 1);
1091 void helper_cmpfeq (void)
1093 if (float64_eq(FT0, FT1, &FP_STATUS))
1094 T0 = 1;
1095 else
1096 T0 = 0;
1099 void helper_cmpfne (void)
1101 if (float64_eq(FT0, FT1, &FP_STATUS))
1102 T0 = 0;
1103 else
1104 T0 = 1;
1107 void helper_cmpflt (void)
1109 if (float64_lt(FT0, FT1, &FP_STATUS))
1110 T0 = 1;
1111 else
1112 T0 = 0;
1115 void helper_cmpfle (void)
1117 if (float64_lt(FT0, FT1, &FP_STATUS))
1118 T0 = 1;
1119 else
1120 T0 = 0;
1123 void helper_cmpfgt (void)
1125 if (float64_le(FT0, FT1, &FP_STATUS))
1126 T0 = 0;
1127 else
1128 T0 = 1;
1131 void helper_cmpfge (void)
1133 if (float64_lt(FT0, FT1, &FP_STATUS))
1134 T0 = 0;
1135 else
1136 T0 = 1;
1139 #if !defined (CONFIG_USER_ONLY)
1140 void helper_mfpr (int iprn)
1142 uint64_t val;
1144 if (cpu_alpha_mfpr(env, iprn, &val) == 0)
1145 T0 = val;
1148 void helper_mtpr (int iprn)
1150 cpu_alpha_mtpr(env, iprn, T0, NULL);
1152 #endif
1154 /*****************************************************************************/
1155 /* Softmmu support */
1156 #if !defined (CONFIG_USER_ONLY)
1158 #define GETPC() (__builtin_return_address(0))
1160 /* XXX: the two following helpers are pure hacks.
1161 * Hopefully, we emulate the PALcode, then we should never see
1162 * HW_LD / HW_ST instructions.
1164 void helper_ld_phys_to_virt (void)
1166 uint64_t tlb_addr, physaddr;
1167 int index, is_user;
1168 void *retaddr;
1170 is_user = (env->ps >> 3) & 3;
1171 index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1172 redo:
1173 tlb_addr = env->tlb_table[is_user][index].addr_read;
1174 if ((T0 & TARGET_PAGE_MASK) ==
1175 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1176 physaddr = T0 + env->tlb_table[is_user][index].addend;
1177 } else {
1178 /* the page is not in the TLB : fill it */
1179 retaddr = GETPC();
1180 tlb_fill(T0, 0, is_user, retaddr);
1181 goto redo;
1183 T0 = physaddr;
1186 void helper_st_phys_to_virt (void)
1188 uint64_t tlb_addr, physaddr;
1189 int index, is_user;
1190 void *retaddr;
1192 is_user = (env->ps >> 3) & 3;
1193 index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1194 redo:
1195 tlb_addr = env->tlb_table[is_user][index].addr_write;
1196 if ((T0 & TARGET_PAGE_MASK) ==
1197 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1198 physaddr = T0 + env->tlb_table[is_user][index].addend;
1199 } else {
1200 /* the page is not in the TLB : fill it */
1201 retaddr = GETPC();
1202 tlb_fill(T0, 1, is_user, retaddr);
1203 goto redo;
1205 T0 = physaddr;
1208 #define MMUSUFFIX _mmu
1210 #define SHIFT 0
1211 #include "softmmu_template.h"
1213 #define SHIFT 1
1214 #include "softmmu_template.h"
1216 #define SHIFT 2
1217 #include "softmmu_template.h"
1219 #define SHIFT 3
1220 #include "softmmu_template.h"
1222 /* try to fill the TLB and return an exception if error. If retaddr is
1223 NULL, it means that the function was called in C code (i.e. not
1224 from generated code or from helper.c) */
1225 /* XXX: fix it to restore all registers */
1226 void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
1228 TranslationBlock *tb;
1229 CPUState *saved_env;
1230 target_phys_addr_t pc;
1231 int ret;
1233 /* XXX: hack to restore env in all cases, even if not called from
1234 generated code */
1235 saved_env = env;
1236 env = cpu_single_env;
1237 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, is_user, 1);
1238 if (!likely(ret == 0)) {
1239 if (likely(retaddr)) {
1240 /* now we have a real cpu fault */
1241 pc = (target_phys_addr_t)retaddr;
1242 tb = tb_find_pc(pc);
1243 if (likely(tb)) {
1244 /* the PC is inside the translated code. It means that we have
1245 a virtual CPU fault */
1246 cpu_restore_state(tb, env, pc, NULL);
1249 /* Exception index and error code are already set */
1250 cpu_loop_exit();
1252 env = saved_env;
1255 #endif