target-alpha: Indicate NORETURN status when raising exception.
[qemu/kevin.git] / target-alpha / op_helper.c
bloba20913033d0cc32254c8b8575d5fb30d7e9647dc
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "softfloat.h"
23 #include "helper.h"
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
27 void QEMU_NORETURN helper_excp (int excp, int error)
29 env->exception_index = excp;
30 env->error_code = error;
31 cpu_loop_exit();
34 uint64_t helper_load_pcc (void)
36 /* XXX: TODO */
37 return 0;
40 uint64_t helper_load_fpcr (void)
42 return cpu_alpha_load_fpcr (env);
45 void helper_store_fpcr (uint64_t val)
47 cpu_alpha_store_fpcr (env, val);
50 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
52 uint64_t tmp = op1;
53 op1 += op2;
54 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
55 helper_excp(EXCP_ARITH, EXC_M_IOV);
57 return op1;
60 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
62 uint64_t tmp = op1;
63 op1 = (uint32_t)(op1 + op2);
64 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
65 helper_excp(EXCP_ARITH, EXC_M_IOV);
67 return op1;
70 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
72 uint64_t res;
73 res = op1 - op2;
74 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
75 helper_excp(EXCP_ARITH, EXC_M_IOV);
77 return res;
80 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
82 uint32_t res;
83 res = op1 - op2;
84 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
85 helper_excp(EXCP_ARITH, EXC_M_IOV);
87 return res;
90 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
92 int64_t res = (int64_t)op1 * (int64_t)op2;
94 if (unlikely((int32_t)res != res)) {
95 helper_excp(EXCP_ARITH, EXC_M_IOV);
97 return (int64_t)((int32_t)res);
100 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
102 uint64_t tl, th;
104 muls64(&tl, &th, op1, op2);
105 /* If th != 0 && th != -1, then we had an overflow */
106 if (unlikely((th + 1) > 1)) {
107 helper_excp(EXCP_ARITH, EXC_M_IOV);
109 return tl;
112 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
114 uint64_t tl, th;
116 mulu64(&tl, &th, op1, op2);
117 return th;
120 uint64_t helper_ctpop (uint64_t arg)
122 return ctpop64(arg);
125 uint64_t helper_ctlz (uint64_t arg)
127 return clz64(arg);
130 uint64_t helper_cttz (uint64_t arg)
132 return ctz64(arg);
135 static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
137 uint64_t mask;
139 mask = 0;
140 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
141 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
142 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
143 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
144 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
145 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
146 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
147 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
149 return op & ~mask;
152 uint64_t helper_zap(uint64_t val, uint64_t mask)
154 return byte_zap(val, mask);
157 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
159 return byte_zap(val, ~mask);
162 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
164 uint8_t opa, opb, res;
165 int i;
167 res = 0;
168 for (i = 0; i < 8; i++) {
169 opa = op1 >> (i * 8);
170 opb = op2 >> (i * 8);
171 if (opa >= opb)
172 res |= 1 << i;
174 return res;
177 uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
179 uint64_t res = 0;
180 uint8_t opa, opb, opr;
181 int i;
183 for (i = 0; i < 8; ++i) {
184 opa = op1 >> (i * 8);
185 opb = op2 >> (i * 8);
186 opr = opa < opb ? opa : opb;
187 res |= (uint64_t)opr << (i * 8);
189 return res;
192 uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
194 uint64_t res = 0;
195 int8_t opa, opb;
196 uint8_t opr;
197 int i;
199 for (i = 0; i < 8; ++i) {
200 opa = op1 >> (i * 8);
201 opb = op2 >> (i * 8);
202 opr = opa < opb ? opa : opb;
203 res |= (uint64_t)opr << (i * 8);
205 return res;
208 uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
210 uint64_t res = 0;
211 uint16_t opa, opb, opr;
212 int i;
214 for (i = 0; i < 4; ++i) {
215 opa = op1 >> (i * 16);
216 opb = op2 >> (i * 16);
217 opr = opa < opb ? opa : opb;
218 res |= (uint64_t)opr << (i * 16);
220 return res;
223 uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
225 uint64_t res = 0;
226 int16_t opa, opb;
227 uint16_t opr;
228 int i;
230 for (i = 0; i < 4; ++i) {
231 opa = op1 >> (i * 16);
232 opb = op2 >> (i * 16);
233 opr = opa < opb ? opa : opb;
234 res |= (uint64_t)opr << (i * 16);
236 return res;
239 uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
241 uint64_t res = 0;
242 uint8_t opa, opb, opr;
243 int i;
245 for (i = 0; i < 8; ++i) {
246 opa = op1 >> (i * 8);
247 opb = op2 >> (i * 8);
248 opr = opa > opb ? opa : opb;
249 res |= (uint64_t)opr << (i * 8);
251 return res;
254 uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
256 uint64_t res = 0;
257 int8_t opa, opb;
258 uint8_t opr;
259 int i;
261 for (i = 0; i < 8; ++i) {
262 opa = op1 >> (i * 8);
263 opb = op2 >> (i * 8);
264 opr = opa > opb ? opa : opb;
265 res |= (uint64_t)opr << (i * 8);
267 return res;
270 uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
272 uint64_t res = 0;
273 uint16_t opa, opb, opr;
274 int i;
276 for (i = 0; i < 4; ++i) {
277 opa = op1 >> (i * 16);
278 opb = op2 >> (i * 16);
279 opr = opa > opb ? opa : opb;
280 res |= (uint64_t)opr << (i * 16);
282 return res;
285 uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
287 uint64_t res = 0;
288 int16_t opa, opb;
289 uint16_t opr;
290 int i;
292 for (i = 0; i < 4; ++i) {
293 opa = op1 >> (i * 16);
294 opb = op2 >> (i * 16);
295 opr = opa > opb ? opa : opb;
296 res |= (uint64_t)opr << (i * 16);
298 return res;
301 uint64_t helper_perr (uint64_t op1, uint64_t op2)
303 uint64_t res = 0;
304 uint8_t opa, opb, opr;
305 int i;
307 for (i = 0; i < 8; ++i) {
308 opa = op1 >> (i * 8);
309 opb = op2 >> (i * 8);
310 if (opa >= opb)
311 opr = opa - opb;
312 else
313 opr = opb - opa;
314 res += opr;
316 return res;
319 uint64_t helper_pklb (uint64_t op1)
321 return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
324 uint64_t helper_pkwb (uint64_t op1)
326 return ((op1 & 0xff)
327 | ((op1 >> 8) & 0xff00)
328 | ((op1 >> 16) & 0xff0000)
329 | ((op1 >> 24) & 0xff000000));
332 uint64_t helper_unpkbl (uint64_t op1)
334 return (op1 & 0xff) | ((op1 & 0xff00) << 24);
337 uint64_t helper_unpkbw (uint64_t op1)
339 return ((op1 & 0xff)
340 | ((op1 & 0xff00) << 8)
341 | ((op1 & 0xff0000) << 16)
342 | ((op1 & 0xff000000) << 24));
345 /* Floating point helpers */
347 void helper_setroundmode (uint32_t val)
349 set_float_rounding_mode(val, &FP_STATUS);
352 void helper_setflushzero (uint32_t val)
354 set_flush_to_zero(val, &FP_STATUS);
357 void helper_fp_exc_clear (void)
359 set_float_exception_flags(0, &FP_STATUS);
362 uint32_t helper_fp_exc_get (void)
364 return get_float_exception_flags(&FP_STATUS);
367 /* Raise exceptions for ieee fp insns without software completion.
368 In that case there are no exceptions that don't trap; the mask
369 doesn't apply. */
370 void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
372 if (exc) {
373 uint32_t hw_exc = 0;
375 env->ipr[IPR_EXC_MASK] |= 1ull << regno;
377 if (exc & float_flag_invalid) {
378 hw_exc |= EXC_M_INV;
380 if (exc & float_flag_divbyzero) {
381 hw_exc |= EXC_M_DZE;
383 if (exc & float_flag_overflow) {
384 hw_exc |= EXC_M_FOV;
386 if (exc & float_flag_underflow) {
387 hw_exc |= EXC_M_UNF;
389 if (exc & float_flag_inexact) {
390 hw_exc |= EXC_M_INE;
392 helper_excp(EXCP_ARITH, hw_exc);
396 /* Raise exceptions for ieee fp insns with software completion. */
397 void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
399 if (exc) {
400 env->fpcr_exc_status |= exc;
402 exc &= ~env->fpcr_exc_mask;
403 if (exc) {
404 helper_fp_exc_raise(exc, regno);
409 /* Input remapping without software completion. Handle denormal-map-to-zero
410 and trap for all other non-finite numbers. */
411 uint64_t helper_ieee_input(uint64_t val)
413 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
414 uint64_t frac = val & 0xfffffffffffffull;
416 if (exp == 0) {
417 if (frac != 0) {
418 /* If DNZ is set flush denormals to zero on input. */
419 if (env->fpcr_dnz) {
420 val &= 1ull << 63;
421 } else {
422 helper_excp(EXCP_ARITH, EXC_M_UNF);
425 } else if (exp == 0x7ff) {
426 /* Infinity or NaN. */
427 /* ??? I'm not sure these exception bit flags are correct. I do
428 know that the Linux kernel, at least, doesn't rely on them and
429 just emulates the insn to figure out what exception to use. */
430 helper_excp(EXCP_ARITH, frac ? EXC_M_INV : EXC_M_FOV);
432 return val;
435 /* Similar, but does not trap for infinities. Used for comparisons. */
436 uint64_t helper_ieee_input_cmp(uint64_t val)
438 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
439 uint64_t frac = val & 0xfffffffffffffull;
441 if (exp == 0) {
442 if (frac != 0) {
443 /* If DNZ is set flush denormals to zero on input. */
444 if (env->fpcr_dnz) {
445 val &= 1ull << 63;
446 } else {
447 helper_excp(EXCP_ARITH, EXC_M_UNF);
450 } else if (exp == 0x7ff && frac) {
451 /* NaN. */
452 helper_excp(EXCP_ARITH, EXC_M_INV);
454 return val;
457 /* Input remapping with software completion enabled. All we have to do
458 is handle denormal-map-to-zero; all other inputs get exceptions as
459 needed from the actual operation. */
460 uint64_t helper_ieee_input_s(uint64_t val)
462 if (env->fpcr_dnz) {
463 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
464 if (exp == 0) {
465 val &= 1ull << 63;
468 return val;
471 /* F floating (VAX) */
472 static inline uint64_t float32_to_f(float32 fa)
474 uint64_t r, exp, mant, sig;
475 CPU_FloatU a;
477 a.f = fa;
478 sig = ((uint64_t)a.l & 0x80000000) << 32;
479 exp = (a.l >> 23) & 0xff;
480 mant = ((uint64_t)a.l & 0x007fffff) << 29;
482 if (exp == 255) {
483 /* NaN or infinity */
484 r = 1; /* VAX dirty zero */
485 } else if (exp == 0) {
486 if (mant == 0) {
487 /* Zero */
488 r = 0;
489 } else {
490 /* Denormalized */
491 r = sig | ((exp + 1) << 52) | mant;
493 } else {
494 if (exp >= 253) {
495 /* Overflow */
496 r = 1; /* VAX dirty zero */
497 } else {
498 r = sig | ((exp + 2) << 52);
502 return r;
505 static inline float32 f_to_float32(uint64_t a)
507 uint32_t exp, mant_sig;
508 CPU_FloatU r;
510 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
511 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
513 if (unlikely(!exp && mant_sig)) {
514 /* Reserved operands / Dirty zero */
515 helper_excp(EXCP_OPCDEC, 0);
518 if (exp < 3) {
519 /* Underflow */
520 r.l = 0;
521 } else {
522 r.l = ((exp - 2) << 23) | mant_sig;
525 return r.f;
528 uint32_t helper_f_to_memory (uint64_t a)
530 uint32_t r;
531 r = (a & 0x00001fffe0000000ull) >> 13;
532 r |= (a & 0x07ffe00000000000ull) >> 45;
533 r |= (a & 0xc000000000000000ull) >> 48;
534 return r;
537 uint64_t helper_memory_to_f (uint32_t a)
539 uint64_t r;
540 r = ((uint64_t)(a & 0x0000c000)) << 48;
541 r |= ((uint64_t)(a & 0x003fffff)) << 45;
542 r |= ((uint64_t)(a & 0xffff0000)) << 13;
543 if (!(a & 0x00004000))
544 r |= 0x7ll << 59;
545 return r;
548 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
549 either implement VAX arithmetic properly or just signal invalid opcode. */
551 uint64_t helper_addf (uint64_t a, uint64_t b)
553 float32 fa, fb, fr;
555 fa = f_to_float32(a);
556 fb = f_to_float32(b);
557 fr = float32_add(fa, fb, &FP_STATUS);
558 return float32_to_f(fr);
561 uint64_t helper_subf (uint64_t a, uint64_t b)
563 float32 fa, fb, fr;
565 fa = f_to_float32(a);
566 fb = f_to_float32(b);
567 fr = float32_sub(fa, fb, &FP_STATUS);
568 return float32_to_f(fr);
571 uint64_t helper_mulf (uint64_t a, uint64_t b)
573 float32 fa, fb, fr;
575 fa = f_to_float32(a);
576 fb = f_to_float32(b);
577 fr = float32_mul(fa, fb, &FP_STATUS);
578 return float32_to_f(fr);
581 uint64_t helper_divf (uint64_t a, uint64_t b)
583 float32 fa, fb, fr;
585 fa = f_to_float32(a);
586 fb = f_to_float32(b);
587 fr = float32_div(fa, fb, &FP_STATUS);
588 return float32_to_f(fr);
591 uint64_t helper_sqrtf (uint64_t t)
593 float32 ft, fr;
595 ft = f_to_float32(t);
596 fr = float32_sqrt(ft, &FP_STATUS);
597 return float32_to_f(fr);
601 /* G floating (VAX) */
602 static inline uint64_t float64_to_g(float64 fa)
604 uint64_t r, exp, mant, sig;
605 CPU_DoubleU a;
607 a.d = fa;
608 sig = a.ll & 0x8000000000000000ull;
609 exp = (a.ll >> 52) & 0x7ff;
610 mant = a.ll & 0x000fffffffffffffull;
612 if (exp == 2047) {
613 /* NaN or infinity */
614 r = 1; /* VAX dirty zero */
615 } else if (exp == 0) {
616 if (mant == 0) {
617 /* Zero */
618 r = 0;
619 } else {
620 /* Denormalized */
621 r = sig | ((exp + 1) << 52) | mant;
623 } else {
624 if (exp >= 2045) {
625 /* Overflow */
626 r = 1; /* VAX dirty zero */
627 } else {
628 r = sig | ((exp + 2) << 52);
632 return r;
635 static inline float64 g_to_float64(uint64_t a)
637 uint64_t exp, mant_sig;
638 CPU_DoubleU r;
640 exp = (a >> 52) & 0x7ff;
641 mant_sig = a & 0x800fffffffffffffull;
643 if (!exp && mant_sig) {
644 /* Reserved operands / Dirty zero */
645 helper_excp(EXCP_OPCDEC, 0);
648 if (exp < 3) {
649 /* Underflow */
650 r.ll = 0;
651 } else {
652 r.ll = ((exp - 2) << 52) | mant_sig;
655 return r.d;
658 uint64_t helper_g_to_memory (uint64_t a)
660 uint64_t r;
661 r = (a & 0x000000000000ffffull) << 48;
662 r |= (a & 0x00000000ffff0000ull) << 16;
663 r |= (a & 0x0000ffff00000000ull) >> 16;
664 r |= (a & 0xffff000000000000ull) >> 48;
665 return r;
668 uint64_t helper_memory_to_g (uint64_t a)
670 uint64_t r;
671 r = (a & 0x000000000000ffffull) << 48;
672 r |= (a & 0x00000000ffff0000ull) << 16;
673 r |= (a & 0x0000ffff00000000ull) >> 16;
674 r |= (a & 0xffff000000000000ull) >> 48;
675 return r;
678 uint64_t helper_addg (uint64_t a, uint64_t b)
680 float64 fa, fb, fr;
682 fa = g_to_float64(a);
683 fb = g_to_float64(b);
684 fr = float64_add(fa, fb, &FP_STATUS);
685 return float64_to_g(fr);
688 uint64_t helper_subg (uint64_t a, uint64_t b)
690 float64 fa, fb, fr;
692 fa = g_to_float64(a);
693 fb = g_to_float64(b);
694 fr = float64_sub(fa, fb, &FP_STATUS);
695 return float64_to_g(fr);
698 uint64_t helper_mulg (uint64_t a, uint64_t b)
700 float64 fa, fb, fr;
702 fa = g_to_float64(a);
703 fb = g_to_float64(b);
704 fr = float64_mul(fa, fb, &FP_STATUS);
705 return float64_to_g(fr);
708 uint64_t helper_divg (uint64_t a, uint64_t b)
710 float64 fa, fb, fr;
712 fa = g_to_float64(a);
713 fb = g_to_float64(b);
714 fr = float64_div(fa, fb, &FP_STATUS);
715 return float64_to_g(fr);
718 uint64_t helper_sqrtg (uint64_t a)
720 float64 fa, fr;
722 fa = g_to_float64(a);
723 fr = float64_sqrt(fa, &FP_STATUS);
724 return float64_to_g(fr);
728 /* S floating (single) */
730 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
731 static inline uint64_t float32_to_s_int(uint32_t fi)
733 uint32_t frac = fi & 0x7fffff;
734 uint32_t sign = fi >> 31;
735 uint32_t exp_msb = (fi >> 30) & 1;
736 uint32_t exp_low = (fi >> 23) & 0x7f;
737 uint32_t exp;
739 exp = (exp_msb << 10) | exp_low;
740 if (exp_msb) {
741 if (exp_low == 0x7f)
742 exp = 0x7ff;
743 } else {
744 if (exp_low != 0x00)
745 exp |= 0x380;
748 return (((uint64_t)sign << 63)
749 | ((uint64_t)exp << 52)
750 | ((uint64_t)frac << 29));
753 static inline uint64_t float32_to_s(float32 fa)
755 CPU_FloatU a;
756 a.f = fa;
757 return float32_to_s_int(a.l);
760 static inline uint32_t s_to_float32_int(uint64_t a)
762 return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
765 static inline float32 s_to_float32(uint64_t a)
767 CPU_FloatU r;
768 r.l = s_to_float32_int(a);
769 return r.f;
772 uint32_t helper_s_to_memory (uint64_t a)
774 return s_to_float32_int(a);
777 uint64_t helper_memory_to_s (uint32_t a)
779 return float32_to_s_int(a);
782 uint64_t helper_adds (uint64_t a, uint64_t b)
784 float32 fa, fb, fr;
786 fa = s_to_float32(a);
787 fb = s_to_float32(b);
788 fr = float32_add(fa, fb, &FP_STATUS);
789 return float32_to_s(fr);
792 uint64_t helper_subs (uint64_t a, uint64_t b)
794 float32 fa, fb, fr;
796 fa = s_to_float32(a);
797 fb = s_to_float32(b);
798 fr = float32_sub(fa, fb, &FP_STATUS);
799 return float32_to_s(fr);
802 uint64_t helper_muls (uint64_t a, uint64_t b)
804 float32 fa, fb, fr;
806 fa = s_to_float32(a);
807 fb = s_to_float32(b);
808 fr = float32_mul(fa, fb, &FP_STATUS);
809 return float32_to_s(fr);
812 uint64_t helper_divs (uint64_t a, uint64_t b)
814 float32 fa, fb, fr;
816 fa = s_to_float32(a);
817 fb = s_to_float32(b);
818 fr = float32_div(fa, fb, &FP_STATUS);
819 return float32_to_s(fr);
822 uint64_t helper_sqrts (uint64_t a)
824 float32 fa, fr;
826 fa = s_to_float32(a);
827 fr = float32_sqrt(fa, &FP_STATUS);
828 return float32_to_s(fr);
832 /* T floating (double) */
833 static inline float64 t_to_float64(uint64_t a)
835 /* Memory format is the same as float64 */
836 CPU_DoubleU r;
837 r.ll = a;
838 return r.d;
841 static inline uint64_t float64_to_t(float64 fa)
843 /* Memory format is the same as float64 */
844 CPU_DoubleU r;
845 r.d = fa;
846 return r.ll;
849 uint64_t helper_addt (uint64_t a, uint64_t b)
851 float64 fa, fb, fr;
853 fa = t_to_float64(a);
854 fb = t_to_float64(b);
855 fr = float64_add(fa, fb, &FP_STATUS);
856 return float64_to_t(fr);
859 uint64_t helper_subt (uint64_t a, uint64_t b)
861 float64 fa, fb, fr;
863 fa = t_to_float64(a);
864 fb = t_to_float64(b);
865 fr = float64_sub(fa, fb, &FP_STATUS);
866 return float64_to_t(fr);
869 uint64_t helper_mult (uint64_t a, uint64_t b)
871 float64 fa, fb, fr;
873 fa = t_to_float64(a);
874 fb = t_to_float64(b);
875 fr = float64_mul(fa, fb, &FP_STATUS);
876 return float64_to_t(fr);
879 uint64_t helper_divt (uint64_t a, uint64_t b)
881 float64 fa, fb, fr;
883 fa = t_to_float64(a);
884 fb = t_to_float64(b);
885 fr = float64_div(fa, fb, &FP_STATUS);
886 return float64_to_t(fr);
889 uint64_t helper_sqrtt (uint64_t a)
891 float64 fa, fr;
893 fa = t_to_float64(a);
894 fr = float64_sqrt(fa, &FP_STATUS);
895 return float64_to_t(fr);
898 /* Comparisons */
899 uint64_t helper_cmptun (uint64_t a, uint64_t b)
901 float64 fa, fb;
903 fa = t_to_float64(a);
904 fb = t_to_float64(b);
906 if (float64_is_nan(fa) || float64_is_nan(fb))
907 return 0x4000000000000000ULL;
908 else
909 return 0;
912 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
914 float64 fa, fb;
916 fa = t_to_float64(a);
917 fb = t_to_float64(b);
919 if (float64_eq(fa, fb, &FP_STATUS))
920 return 0x4000000000000000ULL;
921 else
922 return 0;
925 uint64_t helper_cmptle(uint64_t a, uint64_t b)
927 float64 fa, fb;
929 fa = t_to_float64(a);
930 fb = t_to_float64(b);
932 if (float64_le(fa, fb, &FP_STATUS))
933 return 0x4000000000000000ULL;
934 else
935 return 0;
938 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
940 float64 fa, fb;
942 fa = t_to_float64(a);
943 fb = t_to_float64(b);
945 if (float64_lt(fa, fb, &FP_STATUS))
946 return 0x4000000000000000ULL;
947 else
948 return 0;
951 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
953 float64 fa, fb;
955 fa = g_to_float64(a);
956 fb = g_to_float64(b);
958 if (float64_eq(fa, fb, &FP_STATUS))
959 return 0x4000000000000000ULL;
960 else
961 return 0;
964 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
966 float64 fa, fb;
968 fa = g_to_float64(a);
969 fb = g_to_float64(b);
971 if (float64_le(fa, fb, &FP_STATUS))
972 return 0x4000000000000000ULL;
973 else
974 return 0;
977 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
979 float64 fa, fb;
981 fa = g_to_float64(a);
982 fb = g_to_float64(b);
984 if (float64_lt(fa, fb, &FP_STATUS))
985 return 0x4000000000000000ULL;
986 else
987 return 0;
990 /* Floating point format conversion */
991 uint64_t helper_cvtts (uint64_t a)
993 float64 fa;
994 float32 fr;
996 fa = t_to_float64(a);
997 fr = float64_to_float32(fa, &FP_STATUS);
998 return float32_to_s(fr);
1001 uint64_t helper_cvtst (uint64_t a)
1003 float32 fa;
1004 float64 fr;
1006 fa = s_to_float32(a);
1007 fr = float32_to_float64(fa, &FP_STATUS);
1008 return float64_to_t(fr);
1011 uint64_t helper_cvtqs (uint64_t a)
1013 float32 fr = int64_to_float32(a, &FP_STATUS);
1014 return float32_to_s(fr);
1017 /* Implement float64 to uint64 conversion without saturation -- we must
1018 supply the truncated result. This behaviour is used by the compiler
1019 to get unsigned conversion for free with the same instruction.
1021 The VI flag is set when overflow or inexact exceptions should be raised. */
1023 static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
1025 uint64_t frac, ret = 0;
1026 uint32_t exp, sign, exc = 0;
1027 int shift;
1029 sign = (a >> 63);
1030 exp = (uint32_t)(a >> 52) & 0x7ff;
1031 frac = a & 0xfffffffffffffull;
1033 if (exp == 0) {
1034 if (unlikely(frac != 0)) {
1035 goto do_underflow;
1037 } else if (exp == 0x7ff) {
1038 exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1039 } else {
1040 /* Restore implicit bit. */
1041 frac |= 0x10000000000000ull;
1043 shift = exp - 1023 - 52;
1044 if (shift >= 0) {
1045 /* In this case the number is so large that we must shift
1046 the fraction left. There is no rounding to do. */
1047 if (shift < 63) {
1048 ret = frac << shift;
1049 if (VI && (ret >> shift) != frac) {
1050 exc = float_flag_overflow;
1053 } else {
1054 uint64_t round;
1056 /* In this case the number is smaller than the fraction as
1057 represented by the 52 bit number. Here we must think
1058 about rounding the result. Handle this by shifting the
1059 fractional part of the number into the high bits of ROUND.
1060 This will let us efficiently handle round-to-nearest. */
1061 shift = -shift;
1062 if (shift < 63) {
1063 ret = frac >> shift;
1064 round = frac << (64 - shift);
1065 } else {
1066 /* The exponent is so small we shift out everything.
1067 Leave a sticky bit for proper rounding below. */
1068 do_underflow:
1069 round = 1;
1072 if (round) {
1073 exc = (VI ? float_flag_inexact : 0);
1074 switch (roundmode) {
1075 case float_round_nearest_even:
1076 if (round == (1ull << 63)) {
1077 /* Fraction is exactly 0.5; round to even. */
1078 ret += (ret & 1);
1079 } else if (round > (1ull << 63)) {
1080 ret += 1;
1082 break;
1083 case float_round_to_zero:
1084 break;
1085 case float_round_up:
1086 ret += 1 - sign;
1087 break;
1088 case float_round_down:
1089 ret += sign;
1090 break;
1094 if (sign) {
1095 ret = -ret;
1098 if (unlikely(exc)) {
1099 float_raise(exc, &FP_STATUS);
1102 return ret;
1105 uint64_t helper_cvttq(uint64_t a)
1107 return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1110 uint64_t helper_cvttq_c(uint64_t a)
1112 return helper_cvttq_internal(a, float_round_to_zero, 0);
1115 uint64_t helper_cvttq_svic(uint64_t a)
1117 return helper_cvttq_internal(a, float_round_to_zero, 1);
1120 uint64_t helper_cvtqt (uint64_t a)
1122 float64 fr = int64_to_float64(a, &FP_STATUS);
1123 return float64_to_t(fr);
1126 uint64_t helper_cvtqf (uint64_t a)
1128 float32 fr = int64_to_float32(a, &FP_STATUS);
1129 return float32_to_f(fr);
1132 uint64_t helper_cvtgf (uint64_t a)
1134 float64 fa;
1135 float32 fr;
1137 fa = g_to_float64(a);
1138 fr = float64_to_float32(fa, &FP_STATUS);
1139 return float32_to_f(fr);
1142 uint64_t helper_cvtgq (uint64_t a)
1144 float64 fa = g_to_float64(a);
1145 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1148 uint64_t helper_cvtqg (uint64_t a)
1150 float64 fr;
1151 fr = int64_to_float64(a, &FP_STATUS);
1152 return float64_to_g(fr);
1155 /* PALcode support special instructions */
1156 #if !defined (CONFIG_USER_ONLY)
1157 void helper_hw_rei (void)
1159 env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
1160 env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
1161 env->intr_flag = 0;
1162 /* XXX: re-enable interrupts and memory mapping */
1165 void helper_hw_ret (uint64_t a)
1167 env->pc = a & ~3;
1168 env->ipr[IPR_EXC_ADDR] = a & 1;
1169 env->intr_flag = 0;
1170 /* XXX: re-enable interrupts and memory mapping */
1173 uint64_t helper_mfpr (int iprn, uint64_t val)
1175 uint64_t tmp;
1177 if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
1178 val = tmp;
1180 return val;
1183 void helper_mtpr (int iprn, uint64_t val)
1185 cpu_alpha_mtpr(env, iprn, val, NULL);
1188 void helper_set_alt_mode (void)
1190 env->saved_mode = env->ps & 0xC;
1191 env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
1194 void helper_restore_mode (void)
1196 env->ps = (env->ps & ~0xC) | env->saved_mode;
1199 #endif
1201 /*****************************************************************************/
1202 /* Softmmu support */
1203 #if !defined (CONFIG_USER_ONLY)
1205 /* XXX: the two following helpers are pure hacks.
1206 * Hopefully, we emulate the PALcode, then we should never see
1207 * HW_LD / HW_ST instructions.
1209 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
1211 uint64_t tlb_addr, physaddr;
1212 int index, mmu_idx;
1213 void *retaddr;
1215 mmu_idx = cpu_mmu_index(env);
1216 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1217 redo:
1218 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1219 if ((virtaddr & TARGET_PAGE_MASK) ==
1220 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1221 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1222 } else {
1223 /* the page is not in the TLB : fill it */
1224 retaddr = GETPC();
1225 tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1226 goto redo;
1228 return physaddr;
1231 uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1233 uint64_t tlb_addr, physaddr;
1234 int index, mmu_idx;
1235 void *retaddr;
1237 mmu_idx = cpu_mmu_index(env);
1238 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1239 redo:
1240 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1241 if ((virtaddr & TARGET_PAGE_MASK) ==
1242 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1243 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1244 } else {
1245 /* the page is not in the TLB : fill it */
1246 retaddr = GETPC();
1247 tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1248 goto redo;
1250 return physaddr;
1253 void helper_ldl_raw(uint64_t t0, uint64_t t1)
1255 ldl_raw(t1, t0);
1258 void helper_ldq_raw(uint64_t t0, uint64_t t1)
1260 ldq_raw(t1, t0);
1263 void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1265 env->lock = t1;
1266 ldl_raw(t1, t0);
1269 void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1271 env->lock = t1;
1272 ldl_raw(t1, t0);
1275 void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1277 ldl_kernel(t1, t0);
1280 void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1282 ldq_kernel(t1, t0);
1285 void helper_ldl_data(uint64_t t0, uint64_t t1)
1287 ldl_data(t1, t0);
1290 void helper_ldq_data(uint64_t t0, uint64_t t1)
1292 ldq_data(t1, t0);
1295 void helper_stl_raw(uint64_t t0, uint64_t t1)
1297 stl_raw(t1, t0);
1300 void helper_stq_raw(uint64_t t0, uint64_t t1)
1302 stq_raw(t1, t0);
1305 uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1307 uint64_t ret;
1309 if (t1 == env->lock) {
1310 stl_raw(t1, t0);
1311 ret = 0;
1312 } else
1313 ret = 1;
1315 env->lock = 1;
1317 return ret;
1320 uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1322 uint64_t ret;
1324 if (t1 == env->lock) {
1325 stq_raw(t1, t0);
1326 ret = 0;
1327 } else
1328 ret = 1;
1330 env->lock = 1;
1332 return ret;
1335 #define MMUSUFFIX _mmu
1337 #define SHIFT 0
1338 #include "softmmu_template.h"
1340 #define SHIFT 1
1341 #include "softmmu_template.h"
1343 #define SHIFT 2
1344 #include "softmmu_template.h"
1346 #define SHIFT 3
1347 #include "softmmu_template.h"
1349 /* try to fill the TLB and return an exception if error. If retaddr is
1350 NULL, it means that the function was called in C code (i.e. not
1351 from generated code or from helper.c) */
1352 /* XXX: fix it to restore all registers */
1353 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1355 TranslationBlock *tb;
1356 CPUState *saved_env;
1357 unsigned long pc;
1358 int ret;
1360 /* XXX: hack to restore env in all cases, even if not called from
1361 generated code */
1362 saved_env = env;
1363 env = cpu_single_env;
1364 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1365 if (!likely(ret == 0)) {
1366 if (likely(retaddr)) {
1367 /* now we have a real cpu fault */
1368 pc = (unsigned long)retaddr;
1369 tb = tb_find_pc(pc);
1370 if (likely(tb)) {
1371 /* the PC is inside the translated code. It means that we have
1372 a virtual CPU fault */
1373 cpu_restore_state(tb, env, pc, NULL);
1376 /* Exception index and error code are already set */
1377 cpu_loop_exit();
1379 env = saved_env;
1382 #endif