Add support for generic notifier lists
[qemu/aliguori-queue.git] / target-alpha / op_helper.c
blob4d2c2ee58edee178f9f90d203d05a35b457cb9ac
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "softfloat.h"
23 #include "helper.h"
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
27 void QEMU_NORETURN helper_excp (int excp, int error)
29 env->exception_index = excp;
30 env->error_code = error;
31 cpu_loop_exit();
34 uint64_t helper_load_pcc (void)
36 /* XXX: TODO */
37 return 0;
40 uint64_t helper_load_fpcr (void)
42 return cpu_alpha_load_fpcr (env);
45 void helper_store_fpcr (uint64_t val)
47 cpu_alpha_store_fpcr (env, val);
50 static spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
52 uint64_t helper_rs(void)
54 uint64_t tmp;
56 spin_lock(&intr_cpu_lock);
57 tmp = env->intr_flag;
58 env->intr_flag = 1;
59 spin_unlock(&intr_cpu_lock);
61 return tmp;
64 uint64_t helper_rc(void)
66 uint64_t tmp;
68 spin_lock(&intr_cpu_lock);
69 tmp = env->intr_flag;
70 env->intr_flag = 0;
71 spin_unlock(&intr_cpu_lock);
73 return tmp;
76 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
78 uint64_t tmp = op1;
79 op1 += op2;
80 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
81 helper_excp(EXCP_ARITH, EXC_M_IOV);
83 return op1;
86 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
88 uint64_t tmp = op1;
89 op1 = (uint32_t)(op1 + op2);
90 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
91 helper_excp(EXCP_ARITH, EXC_M_IOV);
93 return op1;
96 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
98 uint64_t res;
99 res = op1 - op2;
100 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
101 helper_excp(EXCP_ARITH, EXC_M_IOV);
103 return res;
106 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
108 uint32_t res;
109 res = op1 - op2;
110 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
111 helper_excp(EXCP_ARITH, EXC_M_IOV);
113 return res;
116 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
118 int64_t res = (int64_t)op1 * (int64_t)op2;
120 if (unlikely((int32_t)res != res)) {
121 helper_excp(EXCP_ARITH, EXC_M_IOV);
123 return (int64_t)((int32_t)res);
126 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
128 uint64_t tl, th;
130 muls64(&tl, &th, op1, op2);
131 /* If th != 0 && th != -1, then we had an overflow */
132 if (unlikely((th + 1) > 1)) {
133 helper_excp(EXCP_ARITH, EXC_M_IOV);
135 return tl;
138 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
140 uint64_t tl, th;
142 mulu64(&tl, &th, op1, op2);
143 return th;
146 uint64_t helper_ctpop (uint64_t arg)
148 return ctpop64(arg);
151 uint64_t helper_ctlz (uint64_t arg)
153 return clz64(arg);
156 uint64_t helper_cttz (uint64_t arg)
158 return ctz64(arg);
161 static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
163 uint64_t mask;
165 mask = 0;
166 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
167 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
168 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
169 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
170 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
171 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
172 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
173 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
175 return op & ~mask;
178 uint64_t helper_zap(uint64_t val, uint64_t mask)
180 return byte_zap(val, mask);
183 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
185 return byte_zap(val, ~mask);
188 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
190 uint8_t opa, opb, res;
191 int i;
193 res = 0;
194 for (i = 0; i < 8; i++) {
195 opa = op1 >> (i * 8);
196 opb = op2 >> (i * 8);
197 if (opa >= opb)
198 res |= 1 << i;
200 return res;
203 uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
205 uint64_t res = 0;
206 uint8_t opa, opb, opr;
207 int i;
209 for (i = 0; i < 8; ++i) {
210 opa = op1 >> (i * 8);
211 opb = op2 >> (i * 8);
212 opr = opa < opb ? opa : opb;
213 res |= (uint64_t)opr << (i * 8);
215 return res;
218 uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
220 uint64_t res = 0;
221 int8_t opa, opb;
222 uint8_t opr;
223 int i;
225 for (i = 0; i < 8; ++i) {
226 opa = op1 >> (i * 8);
227 opb = op2 >> (i * 8);
228 opr = opa < opb ? opa : opb;
229 res |= (uint64_t)opr << (i * 8);
231 return res;
234 uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
236 uint64_t res = 0;
237 uint16_t opa, opb, opr;
238 int i;
240 for (i = 0; i < 4; ++i) {
241 opa = op1 >> (i * 16);
242 opb = op2 >> (i * 16);
243 opr = opa < opb ? opa : opb;
244 res |= (uint64_t)opr << (i * 16);
246 return res;
249 uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
251 uint64_t res = 0;
252 int16_t opa, opb;
253 uint16_t opr;
254 int i;
256 for (i = 0; i < 4; ++i) {
257 opa = op1 >> (i * 16);
258 opb = op2 >> (i * 16);
259 opr = opa < opb ? opa : opb;
260 res |= (uint64_t)opr << (i * 16);
262 return res;
265 uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
267 uint64_t res = 0;
268 uint8_t opa, opb, opr;
269 int i;
271 for (i = 0; i < 8; ++i) {
272 opa = op1 >> (i * 8);
273 opb = op2 >> (i * 8);
274 opr = opa > opb ? opa : opb;
275 res |= (uint64_t)opr << (i * 8);
277 return res;
280 uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
282 uint64_t res = 0;
283 int8_t opa, opb;
284 uint8_t opr;
285 int i;
287 for (i = 0; i < 8; ++i) {
288 opa = op1 >> (i * 8);
289 opb = op2 >> (i * 8);
290 opr = opa > opb ? opa : opb;
291 res |= (uint64_t)opr << (i * 8);
293 return res;
296 uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
298 uint64_t res = 0;
299 uint16_t opa, opb, opr;
300 int i;
302 for (i = 0; i < 4; ++i) {
303 opa = op1 >> (i * 16);
304 opb = op2 >> (i * 16);
305 opr = opa > opb ? opa : opb;
306 res |= (uint64_t)opr << (i * 16);
308 return res;
311 uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
313 uint64_t res = 0;
314 int16_t opa, opb;
315 uint16_t opr;
316 int i;
318 for (i = 0; i < 4; ++i) {
319 opa = op1 >> (i * 16);
320 opb = op2 >> (i * 16);
321 opr = opa > opb ? opa : opb;
322 res |= (uint64_t)opr << (i * 16);
324 return res;
327 uint64_t helper_perr (uint64_t op1, uint64_t op2)
329 uint64_t res = 0;
330 uint8_t opa, opb, opr;
331 int i;
333 for (i = 0; i < 8; ++i) {
334 opa = op1 >> (i * 8);
335 opb = op2 >> (i * 8);
336 if (opa >= opb)
337 opr = opa - opb;
338 else
339 opr = opb - opa;
340 res += opr;
342 return res;
345 uint64_t helper_pklb (uint64_t op1)
347 return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
350 uint64_t helper_pkwb (uint64_t op1)
352 return ((op1 & 0xff)
353 | ((op1 >> 8) & 0xff00)
354 | ((op1 >> 16) & 0xff0000)
355 | ((op1 >> 24) & 0xff000000));
358 uint64_t helper_unpkbl (uint64_t op1)
360 return (op1 & 0xff) | ((op1 & 0xff00) << 24);
363 uint64_t helper_unpkbw (uint64_t op1)
365 return ((op1 & 0xff)
366 | ((op1 & 0xff00) << 8)
367 | ((op1 & 0xff0000) << 16)
368 | ((op1 & 0xff000000) << 24));
371 /* Floating point helpers */
373 void helper_setroundmode (uint32_t val)
375 set_float_rounding_mode(val, &FP_STATUS);
378 void helper_setflushzero (uint32_t val)
380 set_flush_to_zero(val, &FP_STATUS);
383 void helper_fp_exc_clear (void)
385 set_float_exception_flags(0, &FP_STATUS);
388 uint32_t helper_fp_exc_get (void)
390 return get_float_exception_flags(&FP_STATUS);
393 /* Raise exceptions for ieee fp insns without software completion.
394 In that case there are no exceptions that don't trap; the mask
395 doesn't apply. */
396 void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
398 if (exc) {
399 uint32_t hw_exc = 0;
401 env->ipr[IPR_EXC_MASK] |= 1ull << regno;
403 if (exc & float_flag_invalid) {
404 hw_exc |= EXC_M_INV;
406 if (exc & float_flag_divbyzero) {
407 hw_exc |= EXC_M_DZE;
409 if (exc & float_flag_overflow) {
410 hw_exc |= EXC_M_FOV;
412 if (exc & float_flag_underflow) {
413 hw_exc |= EXC_M_UNF;
415 if (exc & float_flag_inexact) {
416 hw_exc |= EXC_M_INE;
418 helper_excp(EXCP_ARITH, hw_exc);
422 /* Raise exceptions for ieee fp insns with software completion. */
423 void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
425 if (exc) {
426 env->fpcr_exc_status |= exc;
428 exc &= ~env->fpcr_exc_mask;
429 if (exc) {
430 helper_fp_exc_raise(exc, regno);
435 /* Input remapping without software completion. Handle denormal-map-to-zero
436 and trap for all other non-finite numbers. */
437 uint64_t helper_ieee_input(uint64_t val)
439 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
440 uint64_t frac = val & 0xfffffffffffffull;
442 if (exp == 0) {
443 if (frac != 0) {
444 /* If DNZ is set flush denormals to zero on input. */
445 if (env->fpcr_dnz) {
446 val &= 1ull << 63;
447 } else {
448 helper_excp(EXCP_ARITH, EXC_M_UNF);
451 } else if (exp == 0x7ff) {
452 /* Infinity or NaN. */
453 /* ??? I'm not sure these exception bit flags are correct. I do
454 know that the Linux kernel, at least, doesn't rely on them and
455 just emulates the insn to figure out what exception to use. */
456 helper_excp(EXCP_ARITH, frac ? EXC_M_INV : EXC_M_FOV);
458 return val;
461 /* Similar, but does not trap for infinities. Used for comparisons. */
462 uint64_t helper_ieee_input_cmp(uint64_t val)
464 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
465 uint64_t frac = val & 0xfffffffffffffull;
467 if (exp == 0) {
468 if (frac != 0) {
469 /* If DNZ is set flush denormals to zero on input. */
470 if (env->fpcr_dnz) {
471 val &= 1ull << 63;
472 } else {
473 helper_excp(EXCP_ARITH, EXC_M_UNF);
476 } else if (exp == 0x7ff && frac) {
477 /* NaN. */
478 helper_excp(EXCP_ARITH, EXC_M_INV);
480 return val;
483 /* Input remapping with software completion enabled. All we have to do
484 is handle denormal-map-to-zero; all other inputs get exceptions as
485 needed from the actual operation. */
486 uint64_t helper_ieee_input_s(uint64_t val)
488 if (env->fpcr_dnz) {
489 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
490 if (exp == 0) {
491 val &= 1ull << 63;
494 return val;
497 /* F floating (VAX) */
498 static inline uint64_t float32_to_f(float32 fa)
500 uint64_t r, exp, mant, sig;
501 CPU_FloatU a;
503 a.f = fa;
504 sig = ((uint64_t)a.l & 0x80000000) << 32;
505 exp = (a.l >> 23) & 0xff;
506 mant = ((uint64_t)a.l & 0x007fffff) << 29;
508 if (exp == 255) {
509 /* NaN or infinity */
510 r = 1; /* VAX dirty zero */
511 } else if (exp == 0) {
512 if (mant == 0) {
513 /* Zero */
514 r = 0;
515 } else {
516 /* Denormalized */
517 r = sig | ((exp + 1) << 52) | mant;
519 } else {
520 if (exp >= 253) {
521 /* Overflow */
522 r = 1; /* VAX dirty zero */
523 } else {
524 r = sig | ((exp + 2) << 52);
528 return r;
531 static inline float32 f_to_float32(uint64_t a)
533 uint32_t exp, mant_sig;
534 CPU_FloatU r;
536 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
537 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
539 if (unlikely(!exp && mant_sig)) {
540 /* Reserved operands / Dirty zero */
541 helper_excp(EXCP_OPCDEC, 0);
544 if (exp < 3) {
545 /* Underflow */
546 r.l = 0;
547 } else {
548 r.l = ((exp - 2) << 23) | mant_sig;
551 return r.f;
554 uint32_t helper_f_to_memory (uint64_t a)
556 uint32_t r;
557 r = (a & 0x00001fffe0000000ull) >> 13;
558 r |= (a & 0x07ffe00000000000ull) >> 45;
559 r |= (a & 0xc000000000000000ull) >> 48;
560 return r;
563 uint64_t helper_memory_to_f (uint32_t a)
565 uint64_t r;
566 r = ((uint64_t)(a & 0x0000c000)) << 48;
567 r |= ((uint64_t)(a & 0x003fffff)) << 45;
568 r |= ((uint64_t)(a & 0xffff0000)) << 13;
569 if (!(a & 0x00004000))
570 r |= 0x7ll << 59;
571 return r;
574 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
575 either implement VAX arithmetic properly or just signal invalid opcode. */
577 uint64_t helper_addf (uint64_t a, uint64_t b)
579 float32 fa, fb, fr;
581 fa = f_to_float32(a);
582 fb = f_to_float32(b);
583 fr = float32_add(fa, fb, &FP_STATUS);
584 return float32_to_f(fr);
587 uint64_t helper_subf (uint64_t a, uint64_t b)
589 float32 fa, fb, fr;
591 fa = f_to_float32(a);
592 fb = f_to_float32(b);
593 fr = float32_sub(fa, fb, &FP_STATUS);
594 return float32_to_f(fr);
597 uint64_t helper_mulf (uint64_t a, uint64_t b)
599 float32 fa, fb, fr;
601 fa = f_to_float32(a);
602 fb = f_to_float32(b);
603 fr = float32_mul(fa, fb, &FP_STATUS);
604 return float32_to_f(fr);
607 uint64_t helper_divf (uint64_t a, uint64_t b)
609 float32 fa, fb, fr;
611 fa = f_to_float32(a);
612 fb = f_to_float32(b);
613 fr = float32_div(fa, fb, &FP_STATUS);
614 return float32_to_f(fr);
617 uint64_t helper_sqrtf (uint64_t t)
619 float32 ft, fr;
621 ft = f_to_float32(t);
622 fr = float32_sqrt(ft, &FP_STATUS);
623 return float32_to_f(fr);
627 /* G floating (VAX) */
628 static inline uint64_t float64_to_g(float64 fa)
630 uint64_t r, exp, mant, sig;
631 CPU_DoubleU a;
633 a.d = fa;
634 sig = a.ll & 0x8000000000000000ull;
635 exp = (a.ll >> 52) & 0x7ff;
636 mant = a.ll & 0x000fffffffffffffull;
638 if (exp == 2047) {
639 /* NaN or infinity */
640 r = 1; /* VAX dirty zero */
641 } else if (exp == 0) {
642 if (mant == 0) {
643 /* Zero */
644 r = 0;
645 } else {
646 /* Denormalized */
647 r = sig | ((exp + 1) << 52) | mant;
649 } else {
650 if (exp >= 2045) {
651 /* Overflow */
652 r = 1; /* VAX dirty zero */
653 } else {
654 r = sig | ((exp + 2) << 52);
658 return r;
661 static inline float64 g_to_float64(uint64_t a)
663 uint64_t exp, mant_sig;
664 CPU_DoubleU r;
666 exp = (a >> 52) & 0x7ff;
667 mant_sig = a & 0x800fffffffffffffull;
669 if (!exp && mant_sig) {
670 /* Reserved operands / Dirty zero */
671 helper_excp(EXCP_OPCDEC, 0);
674 if (exp < 3) {
675 /* Underflow */
676 r.ll = 0;
677 } else {
678 r.ll = ((exp - 2) << 52) | mant_sig;
681 return r.d;
684 uint64_t helper_g_to_memory (uint64_t a)
686 uint64_t r;
687 r = (a & 0x000000000000ffffull) << 48;
688 r |= (a & 0x00000000ffff0000ull) << 16;
689 r |= (a & 0x0000ffff00000000ull) >> 16;
690 r |= (a & 0xffff000000000000ull) >> 48;
691 return r;
694 uint64_t helper_memory_to_g (uint64_t a)
696 uint64_t r;
697 r = (a & 0x000000000000ffffull) << 48;
698 r |= (a & 0x00000000ffff0000ull) << 16;
699 r |= (a & 0x0000ffff00000000ull) >> 16;
700 r |= (a & 0xffff000000000000ull) >> 48;
701 return r;
704 uint64_t helper_addg (uint64_t a, uint64_t b)
706 float64 fa, fb, fr;
708 fa = g_to_float64(a);
709 fb = g_to_float64(b);
710 fr = float64_add(fa, fb, &FP_STATUS);
711 return float64_to_g(fr);
714 uint64_t helper_subg (uint64_t a, uint64_t b)
716 float64 fa, fb, fr;
718 fa = g_to_float64(a);
719 fb = g_to_float64(b);
720 fr = float64_sub(fa, fb, &FP_STATUS);
721 return float64_to_g(fr);
724 uint64_t helper_mulg (uint64_t a, uint64_t b)
726 float64 fa, fb, fr;
728 fa = g_to_float64(a);
729 fb = g_to_float64(b);
730 fr = float64_mul(fa, fb, &FP_STATUS);
731 return float64_to_g(fr);
734 uint64_t helper_divg (uint64_t a, uint64_t b)
736 float64 fa, fb, fr;
738 fa = g_to_float64(a);
739 fb = g_to_float64(b);
740 fr = float64_div(fa, fb, &FP_STATUS);
741 return float64_to_g(fr);
744 uint64_t helper_sqrtg (uint64_t a)
746 float64 fa, fr;
748 fa = g_to_float64(a);
749 fr = float64_sqrt(fa, &FP_STATUS);
750 return float64_to_g(fr);
754 /* S floating (single) */
756 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
757 static inline uint64_t float32_to_s_int(uint32_t fi)
759 uint32_t frac = fi & 0x7fffff;
760 uint32_t sign = fi >> 31;
761 uint32_t exp_msb = (fi >> 30) & 1;
762 uint32_t exp_low = (fi >> 23) & 0x7f;
763 uint32_t exp;
765 exp = (exp_msb << 10) | exp_low;
766 if (exp_msb) {
767 if (exp_low == 0x7f)
768 exp = 0x7ff;
769 } else {
770 if (exp_low != 0x00)
771 exp |= 0x380;
774 return (((uint64_t)sign << 63)
775 | ((uint64_t)exp << 52)
776 | ((uint64_t)frac << 29));
779 static inline uint64_t float32_to_s(float32 fa)
781 CPU_FloatU a;
782 a.f = fa;
783 return float32_to_s_int(a.l);
786 static inline uint32_t s_to_float32_int(uint64_t a)
788 return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
791 static inline float32 s_to_float32(uint64_t a)
793 CPU_FloatU r;
794 r.l = s_to_float32_int(a);
795 return r.f;
798 uint32_t helper_s_to_memory (uint64_t a)
800 return s_to_float32_int(a);
803 uint64_t helper_memory_to_s (uint32_t a)
805 return float32_to_s_int(a);
808 uint64_t helper_adds (uint64_t a, uint64_t b)
810 float32 fa, fb, fr;
812 fa = s_to_float32(a);
813 fb = s_to_float32(b);
814 fr = float32_add(fa, fb, &FP_STATUS);
815 return float32_to_s(fr);
818 uint64_t helper_subs (uint64_t a, uint64_t b)
820 float32 fa, fb, fr;
822 fa = s_to_float32(a);
823 fb = s_to_float32(b);
824 fr = float32_sub(fa, fb, &FP_STATUS);
825 return float32_to_s(fr);
828 uint64_t helper_muls (uint64_t a, uint64_t b)
830 float32 fa, fb, fr;
832 fa = s_to_float32(a);
833 fb = s_to_float32(b);
834 fr = float32_mul(fa, fb, &FP_STATUS);
835 return float32_to_s(fr);
838 uint64_t helper_divs (uint64_t a, uint64_t b)
840 float32 fa, fb, fr;
842 fa = s_to_float32(a);
843 fb = s_to_float32(b);
844 fr = float32_div(fa, fb, &FP_STATUS);
845 return float32_to_s(fr);
848 uint64_t helper_sqrts (uint64_t a)
850 float32 fa, fr;
852 fa = s_to_float32(a);
853 fr = float32_sqrt(fa, &FP_STATUS);
854 return float32_to_s(fr);
858 /* T floating (double) */
859 static inline float64 t_to_float64(uint64_t a)
861 /* Memory format is the same as float64 */
862 CPU_DoubleU r;
863 r.ll = a;
864 return r.d;
867 static inline uint64_t float64_to_t(float64 fa)
869 /* Memory format is the same as float64 */
870 CPU_DoubleU r;
871 r.d = fa;
872 return r.ll;
875 uint64_t helper_addt (uint64_t a, uint64_t b)
877 float64 fa, fb, fr;
879 fa = t_to_float64(a);
880 fb = t_to_float64(b);
881 fr = float64_add(fa, fb, &FP_STATUS);
882 return float64_to_t(fr);
885 uint64_t helper_subt (uint64_t a, uint64_t b)
887 float64 fa, fb, fr;
889 fa = t_to_float64(a);
890 fb = t_to_float64(b);
891 fr = float64_sub(fa, fb, &FP_STATUS);
892 return float64_to_t(fr);
895 uint64_t helper_mult (uint64_t a, uint64_t b)
897 float64 fa, fb, fr;
899 fa = t_to_float64(a);
900 fb = t_to_float64(b);
901 fr = float64_mul(fa, fb, &FP_STATUS);
902 return float64_to_t(fr);
905 uint64_t helper_divt (uint64_t a, uint64_t b)
907 float64 fa, fb, fr;
909 fa = t_to_float64(a);
910 fb = t_to_float64(b);
911 fr = float64_div(fa, fb, &FP_STATUS);
912 return float64_to_t(fr);
915 uint64_t helper_sqrtt (uint64_t a)
917 float64 fa, fr;
919 fa = t_to_float64(a);
920 fr = float64_sqrt(fa, &FP_STATUS);
921 return float64_to_t(fr);
925 /* Sign copy */
926 uint64_t helper_cpys(uint64_t a, uint64_t b)
928 return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
931 uint64_t helper_cpysn(uint64_t a, uint64_t b)
933 return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
936 uint64_t helper_cpyse(uint64_t a, uint64_t b)
938 return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
942 /* Comparisons */
943 uint64_t helper_cmptun (uint64_t a, uint64_t b)
945 float64 fa, fb;
947 fa = t_to_float64(a);
948 fb = t_to_float64(b);
950 if (float64_is_nan(fa) || float64_is_nan(fb))
951 return 0x4000000000000000ULL;
952 else
953 return 0;
956 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
958 float64 fa, fb;
960 fa = t_to_float64(a);
961 fb = t_to_float64(b);
963 if (float64_eq(fa, fb, &FP_STATUS))
964 return 0x4000000000000000ULL;
965 else
966 return 0;
969 uint64_t helper_cmptle(uint64_t a, uint64_t b)
971 float64 fa, fb;
973 fa = t_to_float64(a);
974 fb = t_to_float64(b);
976 if (float64_le(fa, fb, &FP_STATUS))
977 return 0x4000000000000000ULL;
978 else
979 return 0;
982 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
984 float64 fa, fb;
986 fa = t_to_float64(a);
987 fb = t_to_float64(b);
989 if (float64_lt(fa, fb, &FP_STATUS))
990 return 0x4000000000000000ULL;
991 else
992 return 0;
995 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
997 float64 fa, fb;
999 fa = g_to_float64(a);
1000 fb = g_to_float64(b);
1002 if (float64_eq(fa, fb, &FP_STATUS))
1003 return 0x4000000000000000ULL;
1004 else
1005 return 0;
1008 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
1010 float64 fa, fb;
1012 fa = g_to_float64(a);
1013 fb = g_to_float64(b);
1015 if (float64_le(fa, fb, &FP_STATUS))
1016 return 0x4000000000000000ULL;
1017 else
1018 return 0;
1021 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
1023 float64 fa, fb;
1025 fa = g_to_float64(a);
1026 fb = g_to_float64(b);
1028 if (float64_lt(fa, fb, &FP_STATUS))
1029 return 0x4000000000000000ULL;
1030 else
1031 return 0;
1034 /* Floating point format conversion */
1035 uint64_t helper_cvtts (uint64_t a)
1037 float64 fa;
1038 float32 fr;
1040 fa = t_to_float64(a);
1041 fr = float64_to_float32(fa, &FP_STATUS);
1042 return float32_to_s(fr);
1045 uint64_t helper_cvtst (uint64_t a)
1047 float32 fa;
1048 float64 fr;
1050 fa = s_to_float32(a);
1051 fr = float32_to_float64(fa, &FP_STATUS);
1052 return float64_to_t(fr);
1055 uint64_t helper_cvtqs (uint64_t a)
1057 float32 fr = int64_to_float32(a, &FP_STATUS);
1058 return float32_to_s(fr);
1061 /* Implement float64 to uint64 conversion without saturation -- we must
1062 supply the truncated result. This behaviour is used by the compiler
1063 to get unsigned conversion for free with the same instruction.
1065 The VI flag is set when overflow or inexact exceptions should be raised. */
1067 static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
1069 uint64_t frac, ret = 0;
1070 uint32_t exp, sign, exc = 0;
1071 int shift;
1073 sign = (a >> 63);
1074 exp = (uint32_t)(a >> 52) & 0x7ff;
1075 frac = a & 0xfffffffffffffull;
1077 if (exp == 0) {
1078 if (unlikely(frac != 0)) {
1079 goto do_underflow;
1081 } else if (exp == 0x7ff) {
1082 exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1083 } else {
1084 /* Restore implicit bit. */
1085 frac |= 0x10000000000000ull;
1087 shift = exp - 1023 - 52;
1088 if (shift >= 0) {
1089 /* In this case the number is so large that we must shift
1090 the fraction left. There is no rounding to do. */
1091 if (shift < 63) {
1092 ret = frac << shift;
1093 if (VI && (ret >> shift) != frac) {
1094 exc = float_flag_overflow;
1097 } else {
1098 uint64_t round;
1100 /* In this case the number is smaller than the fraction as
1101 represented by the 52 bit number. Here we must think
1102 about rounding the result. Handle this by shifting the
1103 fractional part of the number into the high bits of ROUND.
1104 This will let us efficiently handle round-to-nearest. */
1105 shift = -shift;
1106 if (shift < 63) {
1107 ret = frac >> shift;
1108 round = frac << (64 - shift);
1109 } else {
1110 /* The exponent is so small we shift out everything.
1111 Leave a sticky bit for proper rounding below. */
1112 do_underflow:
1113 round = 1;
1116 if (round) {
1117 exc = (VI ? float_flag_inexact : 0);
1118 switch (roundmode) {
1119 case float_round_nearest_even:
1120 if (round == (1ull << 63)) {
1121 /* Fraction is exactly 0.5; round to even. */
1122 ret += (ret & 1);
1123 } else if (round > (1ull << 63)) {
1124 ret += 1;
1126 break;
1127 case float_round_to_zero:
1128 break;
1129 case float_round_up:
1130 ret += 1 - sign;
1131 break;
1132 case float_round_down:
1133 ret += sign;
1134 break;
1138 if (sign) {
1139 ret = -ret;
1142 if (unlikely(exc)) {
1143 float_raise(exc, &FP_STATUS);
1146 return ret;
1149 uint64_t helper_cvttq(uint64_t a)
1151 return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1154 uint64_t helper_cvttq_c(uint64_t a)
1156 return helper_cvttq_internal(a, float_round_to_zero, 0);
1159 uint64_t helper_cvttq_svic(uint64_t a)
1161 return helper_cvttq_internal(a, float_round_to_zero, 1);
1164 uint64_t helper_cvtqt (uint64_t a)
1166 float64 fr = int64_to_float64(a, &FP_STATUS);
1167 return float64_to_t(fr);
1170 uint64_t helper_cvtqf (uint64_t a)
1172 float32 fr = int64_to_float32(a, &FP_STATUS);
1173 return float32_to_f(fr);
1176 uint64_t helper_cvtgf (uint64_t a)
1178 float64 fa;
1179 float32 fr;
1181 fa = g_to_float64(a);
1182 fr = float64_to_float32(fa, &FP_STATUS);
1183 return float32_to_f(fr);
1186 uint64_t helper_cvtgq (uint64_t a)
1188 float64 fa = g_to_float64(a);
1189 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1192 uint64_t helper_cvtqg (uint64_t a)
1194 float64 fr;
1195 fr = int64_to_float64(a, &FP_STATUS);
1196 return float64_to_g(fr);
1199 uint64_t helper_cvtlq (uint64_t a)
1201 int32_t lo = a >> 29;
1202 int32_t hi = a >> 32;
1203 return (lo & 0x3FFFFFFF) | (hi & 0xc0000000);
1206 uint64_t helper_cvtql (uint64_t a)
1208 return ((a & 0xC0000000) << 32) | ((a & 0x7FFFFFFF) << 29);
1211 uint64_t helper_cvtql_v (uint64_t a)
1213 if ((int32_t)a != (int64_t)a)
1214 helper_excp(EXCP_ARITH, EXC_M_IOV);
1215 return helper_cvtql(a);
1218 uint64_t helper_cvtql_sv (uint64_t a)
1220 /* ??? I'm pretty sure there's nothing that /sv needs to do that /v
1221 doesn't do. The only thing I can think is that /sv is a valid
1222 instruction merely for completeness in the ISA. */
1223 return helper_cvtql_v(a);
1226 /* PALcode support special instructions */
1227 #if !defined (CONFIG_USER_ONLY)
1228 void helper_hw_rei (void)
1230 env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
1231 env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
1232 /* XXX: re-enable interrupts and memory mapping */
1235 void helper_hw_ret (uint64_t a)
1237 env->pc = a & ~3;
1238 env->ipr[IPR_EXC_ADDR] = a & 1;
1239 /* XXX: re-enable interrupts and memory mapping */
1242 uint64_t helper_mfpr (int iprn, uint64_t val)
1244 uint64_t tmp;
1246 if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
1247 val = tmp;
1249 return val;
1252 void helper_mtpr (int iprn, uint64_t val)
1254 cpu_alpha_mtpr(env, iprn, val, NULL);
1257 void helper_set_alt_mode (void)
1259 env->saved_mode = env->ps & 0xC;
1260 env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
1263 void helper_restore_mode (void)
1265 env->ps = (env->ps & ~0xC) | env->saved_mode;
1268 #endif
1270 /*****************************************************************************/
1271 /* Softmmu support */
1272 #if !defined (CONFIG_USER_ONLY)
1274 /* XXX: the two following helpers are pure hacks.
1275 * Hopefully, we emulate the PALcode, then we should never see
1276 * HW_LD / HW_ST instructions.
1278 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
1280 uint64_t tlb_addr, physaddr;
1281 int index, mmu_idx;
1282 void *retaddr;
1284 mmu_idx = cpu_mmu_index(env);
1285 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1286 redo:
1287 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1288 if ((virtaddr & TARGET_PAGE_MASK) ==
1289 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1290 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1291 } else {
1292 /* the page is not in the TLB : fill it */
1293 retaddr = GETPC();
1294 tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1295 goto redo;
1297 return physaddr;
1300 uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1302 uint64_t tlb_addr, physaddr;
1303 int index, mmu_idx;
1304 void *retaddr;
1306 mmu_idx = cpu_mmu_index(env);
1307 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1308 redo:
1309 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1310 if ((virtaddr & TARGET_PAGE_MASK) ==
1311 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1312 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1313 } else {
1314 /* the page is not in the TLB : fill it */
1315 retaddr = GETPC();
1316 tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1317 goto redo;
1319 return physaddr;
1322 void helper_ldl_raw(uint64_t t0, uint64_t t1)
1324 ldl_raw(t1, t0);
1327 void helper_ldq_raw(uint64_t t0, uint64_t t1)
1329 ldq_raw(t1, t0);
1332 void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1334 env->lock = t1;
1335 ldl_raw(t1, t0);
1338 void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1340 env->lock = t1;
1341 ldl_raw(t1, t0);
1344 void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1346 ldl_kernel(t1, t0);
1349 void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1351 ldq_kernel(t1, t0);
1354 void helper_ldl_data(uint64_t t0, uint64_t t1)
1356 ldl_data(t1, t0);
1359 void helper_ldq_data(uint64_t t0, uint64_t t1)
1361 ldq_data(t1, t0);
1364 void helper_stl_raw(uint64_t t0, uint64_t t1)
1366 stl_raw(t1, t0);
1369 void helper_stq_raw(uint64_t t0, uint64_t t1)
1371 stq_raw(t1, t0);
1374 uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1376 uint64_t ret;
1378 if (t1 == env->lock) {
1379 stl_raw(t1, t0);
1380 ret = 0;
1381 } else
1382 ret = 1;
1384 env->lock = 1;
1386 return ret;
1389 uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1391 uint64_t ret;
1393 if (t1 == env->lock) {
1394 stq_raw(t1, t0);
1395 ret = 0;
1396 } else
1397 ret = 1;
1399 env->lock = 1;
1401 return ret;
1404 #define MMUSUFFIX _mmu
1406 #define SHIFT 0
1407 #include "softmmu_template.h"
1409 #define SHIFT 1
1410 #include "softmmu_template.h"
1412 #define SHIFT 2
1413 #include "softmmu_template.h"
1415 #define SHIFT 3
1416 #include "softmmu_template.h"
1418 /* try to fill the TLB and return an exception if error. If retaddr is
1419 NULL, it means that the function was called in C code (i.e. not
1420 from generated code or from helper.c) */
1421 /* XXX: fix it to restore all registers */
1422 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1424 TranslationBlock *tb;
1425 CPUState *saved_env;
1426 unsigned long pc;
1427 int ret;
1429 /* XXX: hack to restore env in all cases, even if not called from
1430 generated code */
1431 saved_env = env;
1432 env = cpu_single_env;
1433 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1434 if (!likely(ret == 0)) {
1435 if (likely(retaddr)) {
1436 /* now we have a real cpu fault */
1437 pc = (unsigned long)retaddr;
1438 tb = tb_find_pc(pc);
1439 if (likely(tb)) {
1440 /* the PC is inside the translated code. It means that we have
1441 a virtual CPU fault */
1442 cpu_restore_state(tb, env, pc, NULL);
1445 /* Exception index and error code are already set */
1446 cpu_loop_exit();
1448 env = saved_env;
1451 #endif