Convert all block drivers to new bdrv_create
[qemu-kvm/fedora.git] / target-alpha / op_helper.c
blob82cb2d9f1974a65da9c2deb42c18e3f9be826e0c
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "softfloat.h"
24 #include "helper.h"
26 void helper_tb_flush (void)
28 tb_flush(env);
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
33 void helper_excp (int excp, int error)
35 env->exception_index = excp;
36 env->error_code = error;
37 cpu_loop_exit();
40 uint64_t helper_load_pcc (void)
42 /* XXX: TODO */
43 return 0;
46 uint64_t helper_load_fpcr (void)
48 uint64_t ret = 0;
49 #ifdef CONFIG_SOFTFLOAT
50 ret |= env->fp_status.float_exception_flags << 52;
51 if (env->fp_status.float_exception_flags)
52 ret |= 1ULL << 63;
53 env->ipr[IPR_EXC_SUM] &= ~0x3E:
54 env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
55 #endif
56 switch (env->fp_status.float_rounding_mode) {
57 case float_round_nearest_even:
58 ret |= 2ULL << 58;
59 break;
60 case float_round_down:
61 ret |= 1ULL << 58;
62 break;
63 case float_round_up:
64 ret |= 3ULL << 58;
65 break;
66 case float_round_to_zero:
67 break;
69 return ret;
72 void helper_store_fpcr (uint64_t val)
74 #ifdef CONFIG_SOFTFLOAT
75 set_float_exception_flags((val >> 52) & 0x3F, &FP_STATUS);
76 #endif
77 switch ((val >> 58) & 3) {
78 case 0:
79 set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
80 break;
81 case 1:
82 set_float_rounding_mode(float_round_down, &FP_STATUS);
83 break;
84 case 2:
85 set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
86 break;
87 case 3:
88 set_float_rounding_mode(float_round_up, &FP_STATUS);
89 break;
93 spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
95 uint64_t helper_rs(void)
97 uint64_t tmp;
99 spin_lock(&intr_cpu_lock);
100 tmp = env->intr_flag;
101 env->intr_flag = 1;
102 spin_unlock(&intr_cpu_lock);
104 return tmp;
107 uint64_t helper_rc(void)
109 uint64_t tmp;
111 spin_lock(&intr_cpu_lock);
112 tmp = env->intr_flag;
113 env->intr_flag = 0;
114 spin_unlock(&intr_cpu_lock);
116 return tmp;
119 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
121 uint64_t tmp = op1;
122 op1 += op2;
123 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
124 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
126 return op1;
129 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
131 uint64_t tmp = op1;
132 op1 = (uint32_t)(op1 + op2);
133 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
134 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
136 return op1;
139 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
141 uint64_t res;
142 res = op1 - op2;
143 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
144 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
146 return res;
149 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
151 uint32_t res;
152 res = op1 - op2;
153 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
154 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
156 return res;
159 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
161 int64_t res = (int64_t)op1 * (int64_t)op2;
163 if (unlikely((int32_t)res != res)) {
164 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
166 return (int64_t)((int32_t)res);
169 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
171 uint64_t tl, th;
173 muls64(&tl, &th, op1, op2);
174 /* If th != 0 && th != -1, then we had an overflow */
175 if (unlikely((th + 1) > 1)) {
176 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
178 return tl;
181 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
183 uint64_t tl, th;
185 mulu64(&tl, &th, op1, op2);
186 return th;
189 uint64_t helper_ctpop (uint64_t arg)
191 return ctpop64(arg);
194 uint64_t helper_ctlz (uint64_t arg)
196 return clz64(arg);
199 uint64_t helper_cttz (uint64_t arg)
201 return ctz64(arg);
204 static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
206 uint64_t mask;
208 mask = 0;
209 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
210 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
211 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
212 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
213 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
214 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
215 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
216 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
218 return op & ~mask;
221 uint64_t helper_mskbl(uint64_t val, uint64_t mask)
223 return byte_zap(val, 0x01 << (mask & 7));
226 uint64_t helper_insbl(uint64_t val, uint64_t mask)
228 val <<= (mask & 7) * 8;
229 return byte_zap(val, ~(0x01 << (mask & 7)));
232 uint64_t helper_mskwl(uint64_t val, uint64_t mask)
234 return byte_zap(val, 0x03 << (mask & 7));
237 uint64_t helper_inswl(uint64_t val, uint64_t mask)
239 val <<= (mask & 7) * 8;
240 return byte_zap(val, ~(0x03 << (mask & 7)));
243 uint64_t helper_mskll(uint64_t val, uint64_t mask)
245 return byte_zap(val, 0x0F << (mask & 7));
248 uint64_t helper_insll(uint64_t val, uint64_t mask)
250 val <<= (mask & 7) * 8;
251 return byte_zap(val, ~(0x0F << (mask & 7)));
254 uint64_t helper_zap(uint64_t val, uint64_t mask)
256 return byte_zap(val, mask);
259 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
261 return byte_zap(val, ~mask);
264 uint64_t helper_mskql(uint64_t val, uint64_t mask)
266 return byte_zap(val, 0xFF << (mask & 7));
269 uint64_t helper_insql(uint64_t val, uint64_t mask)
271 val <<= (mask & 7) * 8;
272 return byte_zap(val, ~(0xFF << (mask & 7)));
275 uint64_t helper_mskwh(uint64_t val, uint64_t mask)
277 return byte_zap(val, (0x03 << (mask & 7)) >> 8);
280 uint64_t helper_inswh(uint64_t val, uint64_t mask)
282 val >>= 64 - ((mask & 7) * 8);
283 return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
286 uint64_t helper_msklh(uint64_t val, uint64_t mask)
288 return byte_zap(val, (0x0F << (mask & 7)) >> 8);
291 uint64_t helper_inslh(uint64_t val, uint64_t mask)
293 val >>= 64 - ((mask & 7) * 8);
294 return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
297 uint64_t helper_mskqh(uint64_t val, uint64_t mask)
299 return byte_zap(val, (0xFF << (mask & 7)) >> 8);
302 uint64_t helper_insqh(uint64_t val, uint64_t mask)
304 val >>= 64 - ((mask & 7) * 8);
305 return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
308 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
310 uint8_t opa, opb, res;
311 int i;
313 res = 0;
314 for (i = 0; i < 8; i++) {
315 opa = op1 >> (i * 8);
316 opb = op2 >> (i * 8);
317 if (opa >= opb)
318 res |= 1 << i;
320 return res;
323 /* Floating point helpers */
325 /* F floating (VAX) */
326 static always_inline uint64_t float32_to_f (float32 fa)
328 uint64_t r, exp, mant, sig;
329 CPU_FloatU a;
331 a.f = fa;
332 sig = ((uint64_t)a.l & 0x80000000) << 32;
333 exp = (a.l >> 23) & 0xff;
334 mant = ((uint64_t)a.l & 0x007fffff) << 29;
336 if (exp == 255) {
337 /* NaN or infinity */
338 r = 1; /* VAX dirty zero */
339 } else if (exp == 0) {
340 if (mant == 0) {
341 /* Zero */
342 r = 0;
343 } else {
344 /* Denormalized */
345 r = sig | ((exp + 1) << 52) | mant;
347 } else {
348 if (exp >= 253) {
349 /* Overflow */
350 r = 1; /* VAX dirty zero */
351 } else {
352 r = sig | ((exp + 2) << 52);
356 return r;
359 static always_inline float32 f_to_float32 (uint64_t a)
361 uint32_t exp, mant_sig;
362 CPU_FloatU r;
364 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
365 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
367 if (unlikely(!exp && mant_sig)) {
368 /* Reserved operands / Dirty zero */
369 helper_excp(EXCP_OPCDEC, 0);
372 if (exp < 3) {
373 /* Underflow */
374 r.l = 0;
375 } else {
376 r.l = ((exp - 2) << 23) | mant_sig;
379 return r.f;
382 uint32_t helper_f_to_memory (uint64_t a)
384 uint32_t r;
385 r = (a & 0x00001fffe0000000ull) >> 13;
386 r |= (a & 0x07ffe00000000000ull) >> 45;
387 r |= (a & 0xc000000000000000ull) >> 48;
388 return r;
391 uint64_t helper_memory_to_f (uint32_t a)
393 uint64_t r;
394 r = ((uint64_t)(a & 0x0000c000)) << 48;
395 r |= ((uint64_t)(a & 0x003fffff)) << 45;
396 r |= ((uint64_t)(a & 0xffff0000)) << 13;
397 if (!(a & 0x00004000))
398 r |= 0x7ll << 59;
399 return r;
402 uint64_t helper_addf (uint64_t a, uint64_t b)
404 float32 fa, fb, fr;
406 fa = f_to_float32(a);
407 fb = f_to_float32(b);
408 fr = float32_add(fa, fb, &FP_STATUS);
409 return float32_to_f(fr);
412 uint64_t helper_subf (uint64_t a, uint64_t b)
414 float32 fa, fb, fr;
416 fa = f_to_float32(a);
417 fb = f_to_float32(b);
418 fr = float32_sub(fa, fb, &FP_STATUS);
419 return float32_to_f(fr);
422 uint64_t helper_mulf (uint64_t a, uint64_t b)
424 float32 fa, fb, fr;
426 fa = f_to_float32(a);
427 fb = f_to_float32(b);
428 fr = float32_mul(fa, fb, &FP_STATUS);
429 return float32_to_f(fr);
432 uint64_t helper_divf (uint64_t a, uint64_t b)
434 float32 fa, fb, fr;
436 fa = f_to_float32(a);
437 fb = f_to_float32(b);
438 fr = float32_div(fa, fb, &FP_STATUS);
439 return float32_to_f(fr);
442 uint64_t helper_sqrtf (uint64_t t)
444 float32 ft, fr;
446 ft = f_to_float32(t);
447 fr = float32_sqrt(ft, &FP_STATUS);
448 return float32_to_f(fr);
452 /* G floating (VAX) */
453 static always_inline uint64_t float64_to_g (float64 fa)
455 uint64_t r, exp, mant, sig;
456 CPU_DoubleU a;
458 a.d = fa;
459 sig = a.ll & 0x8000000000000000ull;
460 exp = (a.ll >> 52) & 0x7ff;
461 mant = a.ll & 0x000fffffffffffffull;
463 if (exp == 2047) {
464 /* NaN or infinity */
465 r = 1; /* VAX dirty zero */
466 } else if (exp == 0) {
467 if (mant == 0) {
468 /* Zero */
469 r = 0;
470 } else {
471 /* Denormalized */
472 r = sig | ((exp + 1) << 52) | mant;
474 } else {
475 if (exp >= 2045) {
476 /* Overflow */
477 r = 1; /* VAX dirty zero */
478 } else {
479 r = sig | ((exp + 2) << 52);
483 return r;
486 static always_inline float64 g_to_float64 (uint64_t a)
488 uint64_t exp, mant_sig;
489 CPU_DoubleU r;
491 exp = (a >> 52) & 0x7ff;
492 mant_sig = a & 0x800fffffffffffffull;
494 if (!exp && mant_sig) {
495 /* Reserved operands / Dirty zero */
496 helper_excp(EXCP_OPCDEC, 0);
499 if (exp < 3) {
500 /* Underflow */
501 r.ll = 0;
502 } else {
503 r.ll = ((exp - 2) << 52) | mant_sig;
506 return r.d;
509 uint64_t helper_g_to_memory (uint64_t a)
511 uint64_t r;
512 r = (a & 0x000000000000ffffull) << 48;
513 r |= (a & 0x00000000ffff0000ull) << 16;
514 r |= (a & 0x0000ffff00000000ull) >> 16;
515 r |= (a & 0xffff000000000000ull) >> 48;
516 return r;
519 uint64_t helper_memory_to_g (uint64_t a)
521 uint64_t r;
522 r = (a & 0x000000000000ffffull) << 48;
523 r |= (a & 0x00000000ffff0000ull) << 16;
524 r |= (a & 0x0000ffff00000000ull) >> 16;
525 r |= (a & 0xffff000000000000ull) >> 48;
526 return r;
529 uint64_t helper_addg (uint64_t a, uint64_t b)
531 float64 fa, fb, fr;
533 fa = g_to_float64(a);
534 fb = g_to_float64(b);
535 fr = float64_add(fa, fb, &FP_STATUS);
536 return float64_to_g(fr);
539 uint64_t helper_subg (uint64_t a, uint64_t b)
541 float64 fa, fb, fr;
543 fa = g_to_float64(a);
544 fb = g_to_float64(b);
545 fr = float64_sub(fa, fb, &FP_STATUS);
546 return float64_to_g(fr);
549 uint64_t helper_mulg (uint64_t a, uint64_t b)
551 float64 fa, fb, fr;
553 fa = g_to_float64(a);
554 fb = g_to_float64(b);
555 fr = float64_mul(fa, fb, &FP_STATUS);
556 return float64_to_g(fr);
559 uint64_t helper_divg (uint64_t a, uint64_t b)
561 float64 fa, fb, fr;
563 fa = g_to_float64(a);
564 fb = g_to_float64(b);
565 fr = float64_div(fa, fb, &FP_STATUS);
566 return float64_to_g(fr);
569 uint64_t helper_sqrtg (uint64_t a)
571 float64 fa, fr;
573 fa = g_to_float64(a);
574 fr = float64_sqrt(fa, &FP_STATUS);
575 return float64_to_g(fr);
579 /* S floating (single) */
580 static always_inline uint64_t float32_to_s (float32 fa)
582 CPU_FloatU a;
583 uint64_t r;
585 a.f = fa;
587 r = (((uint64_t)(a.l & 0xc0000000)) << 32) | (((uint64_t)(a.l & 0x3fffffff)) << 29);
588 if (((a.l & 0x7f800000) != 0x7f800000) && (!(a.l & 0x40000000)))
589 r |= 0x7ll << 59;
590 return r;
593 static always_inline float32 s_to_float32 (uint64_t a)
595 CPU_FloatU r;
596 r.l = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
597 return r.f;
600 uint32_t helper_s_to_memory (uint64_t a)
602 /* Memory format is the same as float32 */
603 float32 fa = s_to_float32(a);
604 return *(uint32_t*)(&fa);
607 uint64_t helper_memory_to_s (uint32_t a)
609 /* Memory format is the same as float32 */
610 return float32_to_s(*(float32*)(&a));
613 uint64_t helper_adds (uint64_t a, uint64_t b)
615 float32 fa, fb, fr;
617 fa = s_to_float32(a);
618 fb = s_to_float32(b);
619 fr = float32_add(fa, fb, &FP_STATUS);
620 return float32_to_s(fr);
623 uint64_t helper_subs (uint64_t a, uint64_t b)
625 float32 fa, fb, fr;
627 fa = s_to_float32(a);
628 fb = s_to_float32(b);
629 fr = float32_sub(fa, fb, &FP_STATUS);
630 return float32_to_s(fr);
633 uint64_t helper_muls (uint64_t a, uint64_t b)
635 float32 fa, fb, fr;
637 fa = s_to_float32(a);
638 fb = s_to_float32(b);
639 fr = float32_mul(fa, fb, &FP_STATUS);
640 return float32_to_s(fr);
643 uint64_t helper_divs (uint64_t a, uint64_t b)
645 float32 fa, fb, fr;
647 fa = s_to_float32(a);
648 fb = s_to_float32(b);
649 fr = float32_div(fa, fb, &FP_STATUS);
650 return float32_to_s(fr);
653 uint64_t helper_sqrts (uint64_t a)
655 float32 fa, fr;
657 fa = s_to_float32(a);
658 fr = float32_sqrt(fa, &FP_STATUS);
659 return float32_to_s(fr);
663 /* T floating (double) */
664 static always_inline float64 t_to_float64 (uint64_t a)
666 /* Memory format is the same as float64 */
667 CPU_DoubleU r;
668 r.ll = a;
669 return r.d;
672 static always_inline uint64_t float64_to_t (float64 fa)
674 /* Memory format is the same as float64 */
675 CPU_DoubleU r;
676 r.d = fa;
677 return r.ll;
680 uint64_t helper_addt (uint64_t a, uint64_t b)
682 float64 fa, fb, fr;
684 fa = t_to_float64(a);
685 fb = t_to_float64(b);
686 fr = float64_add(fa, fb, &FP_STATUS);
687 return float64_to_t(fr);
690 uint64_t helper_subt (uint64_t a, uint64_t b)
692 float64 fa, fb, fr;
694 fa = t_to_float64(a);
695 fb = t_to_float64(b);
696 fr = float64_sub(fa, fb, &FP_STATUS);
697 return float64_to_t(fr);
700 uint64_t helper_mult (uint64_t a, uint64_t b)
702 float64 fa, fb, fr;
704 fa = t_to_float64(a);
705 fb = t_to_float64(b);
706 fr = float64_mul(fa, fb, &FP_STATUS);
707 return float64_to_t(fr);
710 uint64_t helper_divt (uint64_t a, uint64_t b)
712 float64 fa, fb, fr;
714 fa = t_to_float64(a);
715 fb = t_to_float64(b);
716 fr = float64_div(fa, fb, &FP_STATUS);
717 return float64_to_t(fr);
720 uint64_t helper_sqrtt (uint64_t a)
722 float64 fa, fr;
724 fa = t_to_float64(a);
725 fr = float64_sqrt(fa, &FP_STATUS);
726 return float64_to_t(fr);
730 /* Sign copy */
731 uint64_t helper_cpys(uint64_t a, uint64_t b)
733 return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
736 uint64_t helper_cpysn(uint64_t a, uint64_t b)
738 return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
741 uint64_t helper_cpyse(uint64_t a, uint64_t b)
743 return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
747 /* Comparisons */
748 uint64_t helper_cmptun (uint64_t a, uint64_t b)
750 float64 fa, fb;
752 fa = t_to_float64(a);
753 fb = t_to_float64(b);
755 if (float64_is_nan(fa) || float64_is_nan(fb))
756 return 0x4000000000000000ULL;
757 else
758 return 0;
761 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
763 float64 fa, fb;
765 fa = t_to_float64(a);
766 fb = t_to_float64(b);
768 if (float64_eq(fa, fb, &FP_STATUS))
769 return 0x4000000000000000ULL;
770 else
771 return 0;
774 uint64_t helper_cmptle(uint64_t a, uint64_t b)
776 float64 fa, fb;
778 fa = t_to_float64(a);
779 fb = t_to_float64(b);
781 if (float64_le(fa, fb, &FP_STATUS))
782 return 0x4000000000000000ULL;
783 else
784 return 0;
787 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
789 float64 fa, fb;
791 fa = t_to_float64(a);
792 fb = t_to_float64(b);
794 if (float64_lt(fa, fb, &FP_STATUS))
795 return 0x4000000000000000ULL;
796 else
797 return 0;
800 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
802 float64 fa, fb;
804 fa = g_to_float64(a);
805 fb = g_to_float64(b);
807 if (float64_eq(fa, fb, &FP_STATUS))
808 return 0x4000000000000000ULL;
809 else
810 return 0;
813 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
815 float64 fa, fb;
817 fa = g_to_float64(a);
818 fb = g_to_float64(b);
820 if (float64_le(fa, fb, &FP_STATUS))
821 return 0x4000000000000000ULL;
822 else
823 return 0;
826 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
828 float64 fa, fb;
830 fa = g_to_float64(a);
831 fb = g_to_float64(b);
833 if (float64_lt(fa, fb, &FP_STATUS))
834 return 0x4000000000000000ULL;
835 else
836 return 0;
839 uint64_t helper_cmpfeq (uint64_t a)
841 return !(a & 0x7FFFFFFFFFFFFFFFULL);
844 uint64_t helper_cmpfne (uint64_t a)
846 return (a & 0x7FFFFFFFFFFFFFFFULL);
849 uint64_t helper_cmpflt (uint64_t a)
851 return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
854 uint64_t helper_cmpfle (uint64_t a)
856 return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
859 uint64_t helper_cmpfgt (uint64_t a)
861 return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
864 uint64_t helper_cmpfge (uint64_t a)
866 return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
870 /* Floating point format conversion */
871 uint64_t helper_cvtts (uint64_t a)
873 float64 fa;
874 float32 fr;
876 fa = t_to_float64(a);
877 fr = float64_to_float32(fa, &FP_STATUS);
878 return float32_to_s(fr);
881 uint64_t helper_cvtst (uint64_t a)
883 float32 fa;
884 float64 fr;
886 fa = s_to_float32(a);
887 fr = float32_to_float64(fa, &FP_STATUS);
888 return float64_to_t(fr);
891 uint64_t helper_cvtqs (uint64_t a)
893 float32 fr = int64_to_float32(a, &FP_STATUS);
894 return float32_to_s(fr);
897 uint64_t helper_cvttq (uint64_t a)
899 float64 fa = t_to_float64(a);
900 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
903 uint64_t helper_cvtqt (uint64_t a)
905 float64 fr = int64_to_float64(a, &FP_STATUS);
906 return float64_to_t(fr);
909 uint64_t helper_cvtqf (uint64_t a)
911 float32 fr = int64_to_float32(a, &FP_STATUS);
912 return float32_to_f(fr);
915 uint64_t helper_cvtgf (uint64_t a)
917 float64 fa;
918 float32 fr;
920 fa = g_to_float64(a);
921 fr = float64_to_float32(fa, &FP_STATUS);
922 return float32_to_f(fr);
925 uint64_t helper_cvtgq (uint64_t a)
927 float64 fa = g_to_float64(a);
928 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
931 uint64_t helper_cvtqg (uint64_t a)
933 float64 fr;
934 fr = int64_to_float64(a, &FP_STATUS);
935 return float64_to_g(fr);
938 uint64_t helper_cvtlq (uint64_t a)
940 return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
943 static always_inline uint64_t __helper_cvtql (uint64_t a, int s, int v)
945 uint64_t r;
947 r = ((uint64_t)(a & 0xC0000000)) << 32;
948 r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
950 if (v && (int64_t)((int32_t)r) != (int64_t)r) {
951 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
953 if (s) {
954 /* TODO */
956 return r;
959 uint64_t helper_cvtql (uint64_t a)
961 return __helper_cvtql(a, 0, 0);
964 uint64_t helper_cvtqlv (uint64_t a)
966 return __helper_cvtql(a, 0, 1);
969 uint64_t helper_cvtqlsv (uint64_t a)
971 return __helper_cvtql(a, 1, 1);
974 /* PALcode support special instructions */
975 #if !defined (CONFIG_USER_ONLY)
976 void helper_hw_rei (void)
978 env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
979 env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
980 /* XXX: re-enable interrupts and memory mapping */
983 void helper_hw_ret (uint64_t a)
985 env->pc = a & ~3;
986 env->ipr[IPR_EXC_ADDR] = a & 1;
987 /* XXX: re-enable interrupts and memory mapping */
990 uint64_t helper_mfpr (int iprn, uint64_t val)
992 uint64_t tmp;
994 if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
995 val = tmp;
997 return val;
1000 void helper_mtpr (int iprn, uint64_t val)
1002 cpu_alpha_mtpr(env, iprn, val, NULL);
1005 void helper_set_alt_mode (void)
1007 env->saved_mode = env->ps & 0xC;
1008 env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
1011 void helper_restore_mode (void)
1013 env->ps = (env->ps & ~0xC) | env->saved_mode;
1016 #endif
1018 /*****************************************************************************/
1019 /* Softmmu support */
1020 #if !defined (CONFIG_USER_ONLY)
1022 /* XXX: the two following helpers are pure hacks.
1023 * Hopefully, we emulate the PALcode, then we should never see
1024 * HW_LD / HW_ST instructions.
1026 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
1028 uint64_t tlb_addr, physaddr;
1029 int index, mmu_idx;
1030 void *retaddr;
1032 mmu_idx = cpu_mmu_index(env);
1033 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1034 redo:
1035 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1036 if ((virtaddr & TARGET_PAGE_MASK) ==
1037 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1038 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1039 } else {
1040 /* the page is not in the TLB : fill it */
1041 retaddr = GETPC();
1042 tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1043 goto redo;
1045 return physaddr;
1048 uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1050 uint64_t tlb_addr, physaddr;
1051 int index, mmu_idx;
1052 void *retaddr;
1054 mmu_idx = cpu_mmu_index(env);
1055 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1056 redo:
1057 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1058 if ((virtaddr & TARGET_PAGE_MASK) ==
1059 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1060 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1061 } else {
1062 /* the page is not in the TLB : fill it */
1063 retaddr = GETPC();
1064 tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1065 goto redo;
1067 return physaddr;
1070 void helper_ldl_raw(uint64_t t0, uint64_t t1)
1072 ldl_raw(t1, t0);
1075 void helper_ldq_raw(uint64_t t0, uint64_t t1)
1077 ldq_raw(t1, t0);
1080 void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1082 env->lock = t1;
1083 ldl_raw(t1, t0);
1086 void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1088 env->lock = t1;
1089 ldl_raw(t1, t0);
1092 void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1094 ldl_kernel(t1, t0);
1097 void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1099 ldq_kernel(t1, t0);
1102 void helper_ldl_data(uint64_t t0, uint64_t t1)
1104 ldl_data(t1, t0);
1107 void helper_ldq_data(uint64_t t0, uint64_t t1)
1109 ldq_data(t1, t0);
1112 void helper_stl_raw(uint64_t t0, uint64_t t1)
1114 stl_raw(t1, t0);
1117 void helper_stq_raw(uint64_t t0, uint64_t t1)
1119 stq_raw(t1, t0);
1122 uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1124 uint64_t ret;
1126 if (t1 == env->lock) {
1127 stl_raw(t1, t0);
1128 ret = 0;
1129 } else
1130 ret = 1;
1132 env->lock = 1;
1134 return ret;
1137 uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1139 uint64_t ret;
1141 if (t1 == env->lock) {
1142 stq_raw(t1, t0);
1143 ret = 0;
1144 } else
1145 ret = 1;
1147 env->lock = 1;
1149 return ret;
1152 #define MMUSUFFIX _mmu
1154 #define SHIFT 0
1155 #include "softmmu_template.h"
1157 #define SHIFT 1
1158 #include "softmmu_template.h"
1160 #define SHIFT 2
1161 #include "softmmu_template.h"
1163 #define SHIFT 3
1164 #include "softmmu_template.h"
1166 /* try to fill the TLB and return an exception if error. If retaddr is
1167 NULL, it means that the function was called in C code (i.e. not
1168 from generated code or from helper.c) */
1169 /* XXX: fix it to restore all registers */
1170 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1172 TranslationBlock *tb;
1173 CPUState *saved_env;
1174 unsigned long pc;
1175 int ret;
1177 /* XXX: hack to restore env in all cases, even if not called from
1178 generated code */
1179 saved_env = env;
1180 env = cpu_single_env;
1181 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1182 if (!likely(ret == 0)) {
1183 if (likely(retaddr)) {
1184 /* now we have a real cpu fault */
1185 pc = (unsigned long)retaddr;
1186 tb = tb_find_pc(pc);
1187 if (likely(tb)) {
1188 /* the PC is inside the translated code. It means that we have
1189 a virtual CPU fault */
1190 cpu_restore_state(tb, env, pc, NULL);
1193 /* Exception index and error code are already set */
1194 cpu_loop_exit();
1196 env = saved_env;
1199 #endif