target-alpha: Implement do_interrupt for system mode.
[qemu/kevin.git] / target-alpha / op_helper.c
blobfc5020ad2435dcb58bbc21243faba7f7aca99fd0
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "softfloat.h"
23 #include "helper.h"
24 #include "qemu-timer.h"
26 /*****************************************************************************/
27 /* Exceptions processing helpers */
29 /* This should only be called from translate, via gen_excp.
30 We expect that ENV->PC has already been updated. */
31 void QEMU_NORETURN helper_excp(int excp, int error)
33 env->exception_index = excp;
34 env->error_code = error;
35 cpu_loop_exit();
38 static void do_restore_state(void *retaddr)
40 unsigned long pc = (unsigned long)retaddr;
42 if (pc) {
43 TranslationBlock *tb = tb_find_pc(pc);
44 if (tb) {
45 cpu_restore_state(tb, env, pc);
50 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
51 static void QEMU_NORETURN dynamic_excp(int excp, int error)
53 env->exception_index = excp;
54 env->error_code = error;
55 do_restore_state(GETPC());
56 cpu_loop_exit();
59 static void QEMU_NORETURN arith_excp(int exc, uint64_t mask)
61 env->trap_arg0 = exc;
62 env->trap_arg1 = mask;
63 dynamic_excp(EXCP_ARITH, 0);
66 uint64_t helper_load_pcc (void)
68 /* ??? This isn't a timer for which we have any rate info. */
69 return (uint32_t)cpu_get_real_ticks();
72 uint64_t helper_load_fpcr (void)
74 return cpu_alpha_load_fpcr (env);
77 void helper_store_fpcr (uint64_t val)
79 cpu_alpha_store_fpcr (env, val);
82 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
84 uint64_t tmp = op1;
85 op1 += op2;
86 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
87 arith_excp(EXC_M_IOV, 0);
89 return op1;
92 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
94 uint64_t tmp = op1;
95 op1 = (uint32_t)(op1 + op2);
96 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
97 arith_excp(EXC_M_IOV, 0);
99 return op1;
102 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
104 uint64_t res;
105 res = op1 - op2;
106 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
107 arith_excp(EXC_M_IOV, 0);
109 return res;
112 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
114 uint32_t res;
115 res = op1 - op2;
116 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
117 arith_excp(EXC_M_IOV, 0);
119 return res;
122 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
124 int64_t res = (int64_t)op1 * (int64_t)op2;
126 if (unlikely((int32_t)res != res)) {
127 arith_excp(EXC_M_IOV, 0);
129 return (int64_t)((int32_t)res);
132 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
134 uint64_t tl, th;
136 muls64(&tl, &th, op1, op2);
137 /* If th != 0 && th != -1, then we had an overflow */
138 if (unlikely((th + 1) > 1)) {
139 arith_excp(EXC_M_IOV, 0);
141 return tl;
144 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
146 uint64_t tl, th;
148 mulu64(&tl, &th, op1, op2);
149 return th;
152 uint64_t helper_ctpop (uint64_t arg)
154 return ctpop64(arg);
157 uint64_t helper_ctlz (uint64_t arg)
159 return clz64(arg);
162 uint64_t helper_cttz (uint64_t arg)
164 return ctz64(arg);
167 static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
169 uint64_t mask;
171 mask = 0;
172 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
173 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
174 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
175 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
176 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
177 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
178 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
179 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
181 return op & ~mask;
184 uint64_t helper_zap(uint64_t val, uint64_t mask)
186 return byte_zap(val, mask);
189 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
191 return byte_zap(val, ~mask);
194 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
196 uint8_t opa, opb, res;
197 int i;
199 res = 0;
200 for (i = 0; i < 8; i++) {
201 opa = op1 >> (i * 8);
202 opb = op2 >> (i * 8);
203 if (opa >= opb)
204 res |= 1 << i;
206 return res;
209 uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
211 uint64_t res = 0;
212 uint8_t opa, opb, opr;
213 int i;
215 for (i = 0; i < 8; ++i) {
216 opa = op1 >> (i * 8);
217 opb = op2 >> (i * 8);
218 opr = opa < opb ? opa : opb;
219 res |= (uint64_t)opr << (i * 8);
221 return res;
224 uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
226 uint64_t res = 0;
227 int8_t opa, opb;
228 uint8_t opr;
229 int i;
231 for (i = 0; i < 8; ++i) {
232 opa = op1 >> (i * 8);
233 opb = op2 >> (i * 8);
234 opr = opa < opb ? opa : opb;
235 res |= (uint64_t)opr << (i * 8);
237 return res;
240 uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
242 uint64_t res = 0;
243 uint16_t opa, opb, opr;
244 int i;
246 for (i = 0; i < 4; ++i) {
247 opa = op1 >> (i * 16);
248 opb = op2 >> (i * 16);
249 opr = opa < opb ? opa : opb;
250 res |= (uint64_t)opr << (i * 16);
252 return res;
255 uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
257 uint64_t res = 0;
258 int16_t opa, opb;
259 uint16_t opr;
260 int i;
262 for (i = 0; i < 4; ++i) {
263 opa = op1 >> (i * 16);
264 opb = op2 >> (i * 16);
265 opr = opa < opb ? opa : opb;
266 res |= (uint64_t)opr << (i * 16);
268 return res;
271 uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
273 uint64_t res = 0;
274 uint8_t opa, opb, opr;
275 int i;
277 for (i = 0; i < 8; ++i) {
278 opa = op1 >> (i * 8);
279 opb = op2 >> (i * 8);
280 opr = opa > opb ? opa : opb;
281 res |= (uint64_t)opr << (i * 8);
283 return res;
286 uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
288 uint64_t res = 0;
289 int8_t opa, opb;
290 uint8_t opr;
291 int i;
293 for (i = 0; i < 8; ++i) {
294 opa = op1 >> (i * 8);
295 opb = op2 >> (i * 8);
296 opr = opa > opb ? opa : opb;
297 res |= (uint64_t)opr << (i * 8);
299 return res;
302 uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
304 uint64_t res = 0;
305 uint16_t opa, opb, opr;
306 int i;
308 for (i = 0; i < 4; ++i) {
309 opa = op1 >> (i * 16);
310 opb = op2 >> (i * 16);
311 opr = opa > opb ? opa : opb;
312 res |= (uint64_t)opr << (i * 16);
314 return res;
317 uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
319 uint64_t res = 0;
320 int16_t opa, opb;
321 uint16_t opr;
322 int i;
324 for (i = 0; i < 4; ++i) {
325 opa = op1 >> (i * 16);
326 opb = op2 >> (i * 16);
327 opr = opa > opb ? opa : opb;
328 res |= (uint64_t)opr << (i * 16);
330 return res;
333 uint64_t helper_perr (uint64_t op1, uint64_t op2)
335 uint64_t res = 0;
336 uint8_t opa, opb, opr;
337 int i;
339 for (i = 0; i < 8; ++i) {
340 opa = op1 >> (i * 8);
341 opb = op2 >> (i * 8);
342 if (opa >= opb)
343 opr = opa - opb;
344 else
345 opr = opb - opa;
346 res += opr;
348 return res;
351 uint64_t helper_pklb (uint64_t op1)
353 return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
356 uint64_t helper_pkwb (uint64_t op1)
358 return ((op1 & 0xff)
359 | ((op1 >> 8) & 0xff00)
360 | ((op1 >> 16) & 0xff0000)
361 | ((op1 >> 24) & 0xff000000));
364 uint64_t helper_unpkbl (uint64_t op1)
366 return (op1 & 0xff) | ((op1 & 0xff00) << 24);
369 uint64_t helper_unpkbw (uint64_t op1)
371 return ((op1 & 0xff)
372 | ((op1 & 0xff00) << 8)
373 | ((op1 & 0xff0000) << 16)
374 | ((op1 & 0xff000000) << 24));
377 /* Floating point helpers */
379 void helper_setroundmode (uint32_t val)
381 set_float_rounding_mode(val, &FP_STATUS);
384 void helper_setflushzero (uint32_t val)
386 set_flush_to_zero(val, &FP_STATUS);
389 void helper_fp_exc_clear (void)
391 set_float_exception_flags(0, &FP_STATUS);
394 uint32_t helper_fp_exc_get (void)
396 return get_float_exception_flags(&FP_STATUS);
399 /* Raise exceptions for ieee fp insns without software completion.
400 In that case there are no exceptions that don't trap; the mask
401 doesn't apply. */
402 void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
404 if (exc) {
405 uint32_t hw_exc = 0;
407 if (exc & float_flag_invalid) {
408 hw_exc |= EXC_M_INV;
410 if (exc & float_flag_divbyzero) {
411 hw_exc |= EXC_M_DZE;
413 if (exc & float_flag_overflow) {
414 hw_exc |= EXC_M_FOV;
416 if (exc & float_flag_underflow) {
417 hw_exc |= EXC_M_UNF;
419 if (exc & float_flag_inexact) {
420 hw_exc |= EXC_M_INE;
423 arith_excp(hw_exc, 1ull << regno);
427 /* Raise exceptions for ieee fp insns with software completion. */
428 void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
430 if (exc) {
431 env->fpcr_exc_status |= exc;
433 exc &= ~env->fpcr_exc_mask;
434 if (exc) {
435 helper_fp_exc_raise(exc, regno);
440 /* Input remapping without software completion. Handle denormal-map-to-zero
441 and trap for all other non-finite numbers. */
442 uint64_t helper_ieee_input(uint64_t val)
444 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
445 uint64_t frac = val & 0xfffffffffffffull;
447 if (exp == 0) {
448 if (frac != 0) {
449 /* If DNZ is set flush denormals to zero on input. */
450 if (env->fpcr_dnz) {
451 val &= 1ull << 63;
452 } else {
453 arith_excp(EXC_M_UNF, 0);
456 } else if (exp == 0x7ff) {
457 /* Infinity or NaN. */
458 /* ??? I'm not sure these exception bit flags are correct. I do
459 know that the Linux kernel, at least, doesn't rely on them and
460 just emulates the insn to figure out what exception to use. */
461 arith_excp(frac ? EXC_M_INV : EXC_M_FOV, 0);
463 return val;
466 /* Similar, but does not trap for infinities. Used for comparisons. */
467 uint64_t helper_ieee_input_cmp(uint64_t val)
469 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
470 uint64_t frac = val & 0xfffffffffffffull;
472 if (exp == 0) {
473 if (frac != 0) {
474 /* If DNZ is set flush denormals to zero on input. */
475 if (env->fpcr_dnz) {
476 val &= 1ull << 63;
477 } else {
478 arith_excp(EXC_M_UNF, 0);
481 } else if (exp == 0x7ff && frac) {
482 /* NaN. */
483 arith_excp(EXC_M_INV, 0);
485 return val;
488 /* Input remapping with software completion enabled. All we have to do
489 is handle denormal-map-to-zero; all other inputs get exceptions as
490 needed from the actual operation. */
491 uint64_t helper_ieee_input_s(uint64_t val)
493 if (env->fpcr_dnz) {
494 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
495 if (exp == 0) {
496 val &= 1ull << 63;
499 return val;
502 /* F floating (VAX) */
503 static inline uint64_t float32_to_f(float32 fa)
505 uint64_t r, exp, mant, sig;
506 CPU_FloatU a;
508 a.f = fa;
509 sig = ((uint64_t)a.l & 0x80000000) << 32;
510 exp = (a.l >> 23) & 0xff;
511 mant = ((uint64_t)a.l & 0x007fffff) << 29;
513 if (exp == 255) {
514 /* NaN or infinity */
515 r = 1; /* VAX dirty zero */
516 } else if (exp == 0) {
517 if (mant == 0) {
518 /* Zero */
519 r = 0;
520 } else {
521 /* Denormalized */
522 r = sig | ((exp + 1) << 52) | mant;
524 } else {
525 if (exp >= 253) {
526 /* Overflow */
527 r = 1; /* VAX dirty zero */
528 } else {
529 r = sig | ((exp + 2) << 52);
533 return r;
536 static inline float32 f_to_float32(uint64_t a)
538 uint32_t exp, mant_sig;
539 CPU_FloatU r;
541 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
542 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
544 if (unlikely(!exp && mant_sig)) {
545 /* Reserved operands / Dirty zero */
546 dynamic_excp(EXCP_OPCDEC, 0);
549 if (exp < 3) {
550 /* Underflow */
551 r.l = 0;
552 } else {
553 r.l = ((exp - 2) << 23) | mant_sig;
556 return r.f;
559 uint32_t helper_f_to_memory (uint64_t a)
561 uint32_t r;
562 r = (a & 0x00001fffe0000000ull) >> 13;
563 r |= (a & 0x07ffe00000000000ull) >> 45;
564 r |= (a & 0xc000000000000000ull) >> 48;
565 return r;
568 uint64_t helper_memory_to_f (uint32_t a)
570 uint64_t r;
571 r = ((uint64_t)(a & 0x0000c000)) << 48;
572 r |= ((uint64_t)(a & 0x003fffff)) << 45;
573 r |= ((uint64_t)(a & 0xffff0000)) << 13;
574 if (!(a & 0x00004000))
575 r |= 0x7ll << 59;
576 return r;
579 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
580 either implement VAX arithmetic properly or just signal invalid opcode. */
582 uint64_t helper_addf (uint64_t a, uint64_t b)
584 float32 fa, fb, fr;
586 fa = f_to_float32(a);
587 fb = f_to_float32(b);
588 fr = float32_add(fa, fb, &FP_STATUS);
589 return float32_to_f(fr);
592 uint64_t helper_subf (uint64_t a, uint64_t b)
594 float32 fa, fb, fr;
596 fa = f_to_float32(a);
597 fb = f_to_float32(b);
598 fr = float32_sub(fa, fb, &FP_STATUS);
599 return float32_to_f(fr);
602 uint64_t helper_mulf (uint64_t a, uint64_t b)
604 float32 fa, fb, fr;
606 fa = f_to_float32(a);
607 fb = f_to_float32(b);
608 fr = float32_mul(fa, fb, &FP_STATUS);
609 return float32_to_f(fr);
612 uint64_t helper_divf (uint64_t a, uint64_t b)
614 float32 fa, fb, fr;
616 fa = f_to_float32(a);
617 fb = f_to_float32(b);
618 fr = float32_div(fa, fb, &FP_STATUS);
619 return float32_to_f(fr);
622 uint64_t helper_sqrtf (uint64_t t)
624 float32 ft, fr;
626 ft = f_to_float32(t);
627 fr = float32_sqrt(ft, &FP_STATUS);
628 return float32_to_f(fr);
632 /* G floating (VAX) */
633 static inline uint64_t float64_to_g(float64 fa)
635 uint64_t r, exp, mant, sig;
636 CPU_DoubleU a;
638 a.d = fa;
639 sig = a.ll & 0x8000000000000000ull;
640 exp = (a.ll >> 52) & 0x7ff;
641 mant = a.ll & 0x000fffffffffffffull;
643 if (exp == 2047) {
644 /* NaN or infinity */
645 r = 1; /* VAX dirty zero */
646 } else if (exp == 0) {
647 if (mant == 0) {
648 /* Zero */
649 r = 0;
650 } else {
651 /* Denormalized */
652 r = sig | ((exp + 1) << 52) | mant;
654 } else {
655 if (exp >= 2045) {
656 /* Overflow */
657 r = 1; /* VAX dirty zero */
658 } else {
659 r = sig | ((exp + 2) << 52);
663 return r;
666 static inline float64 g_to_float64(uint64_t a)
668 uint64_t exp, mant_sig;
669 CPU_DoubleU r;
671 exp = (a >> 52) & 0x7ff;
672 mant_sig = a & 0x800fffffffffffffull;
674 if (!exp && mant_sig) {
675 /* Reserved operands / Dirty zero */
676 dynamic_excp(EXCP_OPCDEC, 0);
679 if (exp < 3) {
680 /* Underflow */
681 r.ll = 0;
682 } else {
683 r.ll = ((exp - 2) << 52) | mant_sig;
686 return r.d;
689 uint64_t helper_g_to_memory (uint64_t a)
691 uint64_t r;
692 r = (a & 0x000000000000ffffull) << 48;
693 r |= (a & 0x00000000ffff0000ull) << 16;
694 r |= (a & 0x0000ffff00000000ull) >> 16;
695 r |= (a & 0xffff000000000000ull) >> 48;
696 return r;
699 uint64_t helper_memory_to_g (uint64_t a)
701 uint64_t r;
702 r = (a & 0x000000000000ffffull) << 48;
703 r |= (a & 0x00000000ffff0000ull) << 16;
704 r |= (a & 0x0000ffff00000000ull) >> 16;
705 r |= (a & 0xffff000000000000ull) >> 48;
706 return r;
709 uint64_t helper_addg (uint64_t a, uint64_t b)
711 float64 fa, fb, fr;
713 fa = g_to_float64(a);
714 fb = g_to_float64(b);
715 fr = float64_add(fa, fb, &FP_STATUS);
716 return float64_to_g(fr);
719 uint64_t helper_subg (uint64_t a, uint64_t b)
721 float64 fa, fb, fr;
723 fa = g_to_float64(a);
724 fb = g_to_float64(b);
725 fr = float64_sub(fa, fb, &FP_STATUS);
726 return float64_to_g(fr);
729 uint64_t helper_mulg (uint64_t a, uint64_t b)
731 float64 fa, fb, fr;
733 fa = g_to_float64(a);
734 fb = g_to_float64(b);
735 fr = float64_mul(fa, fb, &FP_STATUS);
736 return float64_to_g(fr);
739 uint64_t helper_divg (uint64_t a, uint64_t b)
741 float64 fa, fb, fr;
743 fa = g_to_float64(a);
744 fb = g_to_float64(b);
745 fr = float64_div(fa, fb, &FP_STATUS);
746 return float64_to_g(fr);
749 uint64_t helper_sqrtg (uint64_t a)
751 float64 fa, fr;
753 fa = g_to_float64(a);
754 fr = float64_sqrt(fa, &FP_STATUS);
755 return float64_to_g(fr);
759 /* S floating (single) */
761 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
762 static inline uint64_t float32_to_s_int(uint32_t fi)
764 uint32_t frac = fi & 0x7fffff;
765 uint32_t sign = fi >> 31;
766 uint32_t exp_msb = (fi >> 30) & 1;
767 uint32_t exp_low = (fi >> 23) & 0x7f;
768 uint32_t exp;
770 exp = (exp_msb << 10) | exp_low;
771 if (exp_msb) {
772 if (exp_low == 0x7f)
773 exp = 0x7ff;
774 } else {
775 if (exp_low != 0x00)
776 exp |= 0x380;
779 return (((uint64_t)sign << 63)
780 | ((uint64_t)exp << 52)
781 | ((uint64_t)frac << 29));
784 static inline uint64_t float32_to_s(float32 fa)
786 CPU_FloatU a;
787 a.f = fa;
788 return float32_to_s_int(a.l);
791 static inline uint32_t s_to_float32_int(uint64_t a)
793 return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
796 static inline float32 s_to_float32(uint64_t a)
798 CPU_FloatU r;
799 r.l = s_to_float32_int(a);
800 return r.f;
803 uint32_t helper_s_to_memory (uint64_t a)
805 return s_to_float32_int(a);
808 uint64_t helper_memory_to_s (uint32_t a)
810 return float32_to_s_int(a);
813 uint64_t helper_adds (uint64_t a, uint64_t b)
815 float32 fa, fb, fr;
817 fa = s_to_float32(a);
818 fb = s_to_float32(b);
819 fr = float32_add(fa, fb, &FP_STATUS);
820 return float32_to_s(fr);
823 uint64_t helper_subs (uint64_t a, uint64_t b)
825 float32 fa, fb, fr;
827 fa = s_to_float32(a);
828 fb = s_to_float32(b);
829 fr = float32_sub(fa, fb, &FP_STATUS);
830 return float32_to_s(fr);
833 uint64_t helper_muls (uint64_t a, uint64_t b)
835 float32 fa, fb, fr;
837 fa = s_to_float32(a);
838 fb = s_to_float32(b);
839 fr = float32_mul(fa, fb, &FP_STATUS);
840 return float32_to_s(fr);
843 uint64_t helper_divs (uint64_t a, uint64_t b)
845 float32 fa, fb, fr;
847 fa = s_to_float32(a);
848 fb = s_to_float32(b);
849 fr = float32_div(fa, fb, &FP_STATUS);
850 return float32_to_s(fr);
853 uint64_t helper_sqrts (uint64_t a)
855 float32 fa, fr;
857 fa = s_to_float32(a);
858 fr = float32_sqrt(fa, &FP_STATUS);
859 return float32_to_s(fr);
863 /* T floating (double) */
864 static inline float64 t_to_float64(uint64_t a)
866 /* Memory format is the same as float64 */
867 CPU_DoubleU r;
868 r.ll = a;
869 return r.d;
872 static inline uint64_t float64_to_t(float64 fa)
874 /* Memory format is the same as float64 */
875 CPU_DoubleU r;
876 r.d = fa;
877 return r.ll;
880 uint64_t helper_addt (uint64_t a, uint64_t b)
882 float64 fa, fb, fr;
884 fa = t_to_float64(a);
885 fb = t_to_float64(b);
886 fr = float64_add(fa, fb, &FP_STATUS);
887 return float64_to_t(fr);
890 uint64_t helper_subt (uint64_t a, uint64_t b)
892 float64 fa, fb, fr;
894 fa = t_to_float64(a);
895 fb = t_to_float64(b);
896 fr = float64_sub(fa, fb, &FP_STATUS);
897 return float64_to_t(fr);
900 uint64_t helper_mult (uint64_t a, uint64_t b)
902 float64 fa, fb, fr;
904 fa = t_to_float64(a);
905 fb = t_to_float64(b);
906 fr = float64_mul(fa, fb, &FP_STATUS);
907 return float64_to_t(fr);
910 uint64_t helper_divt (uint64_t a, uint64_t b)
912 float64 fa, fb, fr;
914 fa = t_to_float64(a);
915 fb = t_to_float64(b);
916 fr = float64_div(fa, fb, &FP_STATUS);
917 return float64_to_t(fr);
920 uint64_t helper_sqrtt (uint64_t a)
922 float64 fa, fr;
924 fa = t_to_float64(a);
925 fr = float64_sqrt(fa, &FP_STATUS);
926 return float64_to_t(fr);
929 /* Comparisons */
930 uint64_t helper_cmptun (uint64_t a, uint64_t b)
932 float64 fa, fb;
934 fa = t_to_float64(a);
935 fb = t_to_float64(b);
937 if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
938 return 0x4000000000000000ULL;
939 } else {
940 return 0;
944 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
946 float64 fa, fb;
948 fa = t_to_float64(a);
949 fb = t_to_float64(b);
951 if (float64_eq_quiet(fa, fb, &FP_STATUS))
952 return 0x4000000000000000ULL;
953 else
954 return 0;
957 uint64_t helper_cmptle(uint64_t a, uint64_t b)
959 float64 fa, fb;
961 fa = t_to_float64(a);
962 fb = t_to_float64(b);
964 if (float64_le(fa, fb, &FP_STATUS))
965 return 0x4000000000000000ULL;
966 else
967 return 0;
970 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
972 float64 fa, fb;
974 fa = t_to_float64(a);
975 fb = t_to_float64(b);
977 if (float64_lt(fa, fb, &FP_STATUS))
978 return 0x4000000000000000ULL;
979 else
980 return 0;
983 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
985 float64 fa, fb;
987 fa = g_to_float64(a);
988 fb = g_to_float64(b);
990 if (float64_eq_quiet(fa, fb, &FP_STATUS))
991 return 0x4000000000000000ULL;
992 else
993 return 0;
996 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
998 float64 fa, fb;
1000 fa = g_to_float64(a);
1001 fb = g_to_float64(b);
1003 if (float64_le(fa, fb, &FP_STATUS))
1004 return 0x4000000000000000ULL;
1005 else
1006 return 0;
1009 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
1011 float64 fa, fb;
1013 fa = g_to_float64(a);
1014 fb = g_to_float64(b);
1016 if (float64_lt(fa, fb, &FP_STATUS))
1017 return 0x4000000000000000ULL;
1018 else
1019 return 0;
1022 /* Floating point format conversion */
1023 uint64_t helper_cvtts (uint64_t a)
1025 float64 fa;
1026 float32 fr;
1028 fa = t_to_float64(a);
1029 fr = float64_to_float32(fa, &FP_STATUS);
1030 return float32_to_s(fr);
1033 uint64_t helper_cvtst (uint64_t a)
1035 float32 fa;
1036 float64 fr;
1038 fa = s_to_float32(a);
1039 fr = float32_to_float64(fa, &FP_STATUS);
1040 return float64_to_t(fr);
1043 uint64_t helper_cvtqs (uint64_t a)
1045 float32 fr = int64_to_float32(a, &FP_STATUS);
1046 return float32_to_s(fr);
1049 /* Implement float64 to uint64 conversion without saturation -- we must
1050 supply the truncated result. This behaviour is used by the compiler
1051 to get unsigned conversion for free with the same instruction.
1053 The VI flag is set when overflow or inexact exceptions should be raised. */
1055 static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
1057 uint64_t frac, ret = 0;
1058 uint32_t exp, sign, exc = 0;
1059 int shift;
1061 sign = (a >> 63);
1062 exp = (uint32_t)(a >> 52) & 0x7ff;
1063 frac = a & 0xfffffffffffffull;
1065 if (exp == 0) {
1066 if (unlikely(frac != 0)) {
1067 goto do_underflow;
1069 } else if (exp == 0x7ff) {
1070 exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1071 } else {
1072 /* Restore implicit bit. */
1073 frac |= 0x10000000000000ull;
1075 shift = exp - 1023 - 52;
1076 if (shift >= 0) {
1077 /* In this case the number is so large that we must shift
1078 the fraction left. There is no rounding to do. */
1079 if (shift < 63) {
1080 ret = frac << shift;
1081 if (VI && (ret >> shift) != frac) {
1082 exc = float_flag_overflow;
1085 } else {
1086 uint64_t round;
1088 /* In this case the number is smaller than the fraction as
1089 represented by the 52 bit number. Here we must think
1090 about rounding the result. Handle this by shifting the
1091 fractional part of the number into the high bits of ROUND.
1092 This will let us efficiently handle round-to-nearest. */
1093 shift = -shift;
1094 if (shift < 63) {
1095 ret = frac >> shift;
1096 round = frac << (64 - shift);
1097 } else {
1098 /* The exponent is so small we shift out everything.
1099 Leave a sticky bit for proper rounding below. */
1100 do_underflow:
1101 round = 1;
1104 if (round) {
1105 exc = (VI ? float_flag_inexact : 0);
1106 switch (roundmode) {
1107 case float_round_nearest_even:
1108 if (round == (1ull << 63)) {
1109 /* Fraction is exactly 0.5; round to even. */
1110 ret += (ret & 1);
1111 } else if (round > (1ull << 63)) {
1112 ret += 1;
1114 break;
1115 case float_round_to_zero:
1116 break;
1117 case float_round_up:
1118 ret += 1 - sign;
1119 break;
1120 case float_round_down:
1121 ret += sign;
1122 break;
1126 if (sign) {
1127 ret = -ret;
1130 if (unlikely(exc)) {
1131 float_raise(exc, &FP_STATUS);
1134 return ret;
1137 uint64_t helper_cvttq(uint64_t a)
1139 return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1142 uint64_t helper_cvttq_c(uint64_t a)
1144 return helper_cvttq_internal(a, float_round_to_zero, 0);
1147 uint64_t helper_cvttq_svic(uint64_t a)
1149 return helper_cvttq_internal(a, float_round_to_zero, 1);
1152 uint64_t helper_cvtqt (uint64_t a)
1154 float64 fr = int64_to_float64(a, &FP_STATUS);
1155 return float64_to_t(fr);
1158 uint64_t helper_cvtqf (uint64_t a)
1160 float32 fr = int64_to_float32(a, &FP_STATUS);
1161 return float32_to_f(fr);
1164 uint64_t helper_cvtgf (uint64_t a)
1166 float64 fa;
1167 float32 fr;
1169 fa = g_to_float64(a);
1170 fr = float64_to_float32(fa, &FP_STATUS);
1171 return float32_to_f(fr);
1174 uint64_t helper_cvtgq (uint64_t a)
1176 float64 fa = g_to_float64(a);
1177 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1180 uint64_t helper_cvtqg (uint64_t a)
1182 float64 fr;
1183 fr = int64_to_float64(a, &FP_STATUS);
1184 return float64_to_g(fr);
1187 /* PALcode support special instructions */
1188 #if !defined (CONFIG_USER_ONLY)
1189 void helper_hw_ret (uint64_t a)
1191 env->pc = a & ~3;
1192 env->pal_mode = a & 1;
1193 env->intr_flag = 0;
1194 env->lock_addr = -1;
1196 #endif
1198 /*****************************************************************************/
1199 /* Softmmu support */
1200 #if !defined (CONFIG_USER_ONLY)
1201 uint64_t helper_ldl_phys(uint64_t p)
1203 return (int32_t)ldl_phys(p);
1206 uint64_t helper_ldq_phys(uint64_t p)
1208 return ldq_phys(p);
1211 uint64_t helper_ldl_l_phys(uint64_t p)
1213 env->lock_addr = p;
1214 return env->lock_value = (int32_t)ldl_phys(p);
1217 uint64_t helper_ldq_l_phys(uint64_t p)
1219 env->lock_addr = p;
1220 return env->lock_value = ldl_phys(p);
1223 void helper_stl_phys(uint64_t p, uint64_t v)
1225 stl_phys(p, v);
1228 void helper_stq_phys(uint64_t p, uint64_t v)
1230 stq_phys(p, v);
1233 uint64_t helper_stl_c_phys(uint64_t p, uint64_t v)
1235 uint64_t ret = 0;
1237 if (p == env->lock_addr) {
1238 int32_t old = ldl_phys(p);
1239 if (old == (int32_t)env->lock_value) {
1240 stl_phys(p, v);
1241 ret = 1;
1244 env->lock_addr = -1;
1246 return ret;
1249 uint64_t helper_stq_c_phys(uint64_t p, uint64_t v)
1251 uint64_t ret = 0;
1253 if (p == env->lock_addr) {
1254 uint64_t old = ldq_phys(p);
1255 if (old == env->lock_value) {
1256 stq_phys(p, v);
1257 ret = 1;
1260 env->lock_addr = -1;
1262 return ret;
1265 #define MMUSUFFIX _mmu
1267 #define SHIFT 0
1268 #include "softmmu_template.h"
1270 #define SHIFT 1
1271 #include "softmmu_template.h"
1273 #define SHIFT 2
1274 #include "softmmu_template.h"
1276 #define SHIFT 3
1277 #include "softmmu_template.h"
1279 /* try to fill the TLB and return an exception if error. If retaddr is
1280 NULL, it means that the function was called in C code (i.e. not
1281 from generated code or from helper.c) */
1282 /* XXX: fix it to restore all registers */
1283 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1285 CPUState *saved_env;
1286 int ret;
1288 /* XXX: hack to restore env in all cases, even if not called from
1289 generated code */
1290 saved_env = env;
1291 env = cpu_single_env;
1292 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1293 if (unlikely(ret != 0)) {
1294 do_restore_state(retaddr);
1295 /* Exception index and error code are already set */
1296 cpu_loop_exit();
1298 env = saved_env;
1301 #endif