configure: Improve Xen autodetection for hosts without Xen
[qemu/ar7.git] / target-alpha / op_helper.c
blobcc102dbd633bd21c4614cc52d6f4cbbfef8400d6
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "dyngen-exec.h"
22 #include "host-utils.h"
23 #include "softfloat.h"
24 #include "helper.h"
25 #include "sysemu.h"
26 #include "qemu-timer.h"
28 #define FP_STATUS (env->fp_status)
30 /*****************************************************************************/
31 /* Exceptions processing helpers */
33 /* This should only be called from translate, via gen_excp.
34 We expect that ENV->PC has already been updated. */
35 void QEMU_NORETURN helper_excp(int excp, int error)
37 env->exception_index = excp;
38 env->error_code = error;
39 cpu_loop_exit(env);
42 static void do_restore_state(void *retaddr)
44 unsigned long pc = (unsigned long)retaddr;
46 if (pc) {
47 TranslationBlock *tb = tb_find_pc(pc);
48 if (tb) {
49 cpu_restore_state(tb, env, pc);
54 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
55 static void QEMU_NORETURN dynamic_excp(int excp, int error)
57 env->exception_index = excp;
58 env->error_code = error;
59 do_restore_state(GETPC());
60 cpu_loop_exit(env);
63 static void QEMU_NORETURN arith_excp(int exc, uint64_t mask)
65 env->trap_arg0 = exc;
66 env->trap_arg1 = mask;
67 dynamic_excp(EXCP_ARITH, 0);
70 uint64_t helper_load_pcc (void)
72 #ifndef CONFIG_USER_ONLY
73 /* In system mode we have access to a decent high-resolution clock.
74 In order to make OS-level time accounting work with the RPCC,
75 present it with a well-timed clock fixed at 250MHz. */
76 return (((uint64_t)env->pcc_ofs << 32)
77 | (uint32_t)(qemu_get_clock_ns(vm_clock) >> 2));
78 #else
79 /* In user-mode, vm_clock doesn't exist. Just pass through the host cpu
80 clock ticks. Also, don't bother taking PCC_OFS into account. */
81 return (uint32_t)cpu_get_real_ticks();
82 #endif
85 uint64_t helper_load_fpcr (void)
87 return cpu_alpha_load_fpcr (env);
90 void helper_store_fpcr (uint64_t val)
92 cpu_alpha_store_fpcr (env, val);
95 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
97 uint64_t tmp = op1;
98 op1 += op2;
99 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
100 arith_excp(EXC_M_IOV, 0);
102 return op1;
105 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
107 uint64_t tmp = op1;
108 op1 = (uint32_t)(op1 + op2);
109 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
110 arith_excp(EXC_M_IOV, 0);
112 return op1;
115 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
117 uint64_t res;
118 res = op1 - op2;
119 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
120 arith_excp(EXC_M_IOV, 0);
122 return res;
125 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
127 uint32_t res;
128 res = op1 - op2;
129 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
130 arith_excp(EXC_M_IOV, 0);
132 return res;
135 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
137 int64_t res = (int64_t)op1 * (int64_t)op2;
139 if (unlikely((int32_t)res != res)) {
140 arith_excp(EXC_M_IOV, 0);
142 return (int64_t)((int32_t)res);
145 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
147 uint64_t tl, th;
149 muls64(&tl, &th, op1, op2);
150 /* If th != 0 && th != -1, then we had an overflow */
151 if (unlikely((th + 1) > 1)) {
152 arith_excp(EXC_M_IOV, 0);
154 return tl;
157 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
159 uint64_t tl, th;
161 mulu64(&tl, &th, op1, op2);
162 return th;
165 uint64_t helper_ctpop (uint64_t arg)
167 return ctpop64(arg);
170 uint64_t helper_ctlz (uint64_t arg)
172 return clz64(arg);
175 uint64_t helper_cttz (uint64_t arg)
177 return ctz64(arg);
180 static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
182 uint64_t mask;
184 mask = 0;
185 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
186 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
187 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
188 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
189 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
190 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
191 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
192 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
194 return op & ~mask;
197 uint64_t helper_zap(uint64_t val, uint64_t mask)
199 return byte_zap(val, mask);
202 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
204 return byte_zap(val, ~mask);
207 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
209 uint8_t opa, opb, res;
210 int i;
212 res = 0;
213 for (i = 0; i < 8; i++) {
214 opa = op1 >> (i * 8);
215 opb = op2 >> (i * 8);
216 if (opa >= opb)
217 res |= 1 << i;
219 return res;
222 uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
224 uint64_t res = 0;
225 uint8_t opa, opb, opr;
226 int i;
228 for (i = 0; i < 8; ++i) {
229 opa = op1 >> (i * 8);
230 opb = op2 >> (i * 8);
231 opr = opa < opb ? opa : opb;
232 res |= (uint64_t)opr << (i * 8);
234 return res;
237 uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
239 uint64_t res = 0;
240 int8_t opa, opb;
241 uint8_t opr;
242 int i;
244 for (i = 0; i < 8; ++i) {
245 opa = op1 >> (i * 8);
246 opb = op2 >> (i * 8);
247 opr = opa < opb ? opa : opb;
248 res |= (uint64_t)opr << (i * 8);
250 return res;
253 uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
255 uint64_t res = 0;
256 uint16_t opa, opb, opr;
257 int i;
259 for (i = 0; i < 4; ++i) {
260 opa = op1 >> (i * 16);
261 opb = op2 >> (i * 16);
262 opr = opa < opb ? opa : opb;
263 res |= (uint64_t)opr << (i * 16);
265 return res;
268 uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
270 uint64_t res = 0;
271 int16_t opa, opb;
272 uint16_t opr;
273 int i;
275 for (i = 0; i < 4; ++i) {
276 opa = op1 >> (i * 16);
277 opb = op2 >> (i * 16);
278 opr = opa < opb ? opa : opb;
279 res |= (uint64_t)opr << (i * 16);
281 return res;
284 uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
286 uint64_t res = 0;
287 uint8_t opa, opb, opr;
288 int i;
290 for (i = 0; i < 8; ++i) {
291 opa = op1 >> (i * 8);
292 opb = op2 >> (i * 8);
293 opr = opa > opb ? opa : opb;
294 res |= (uint64_t)opr << (i * 8);
296 return res;
299 uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
301 uint64_t res = 0;
302 int8_t opa, opb;
303 uint8_t opr;
304 int i;
306 for (i = 0; i < 8; ++i) {
307 opa = op1 >> (i * 8);
308 opb = op2 >> (i * 8);
309 opr = opa > opb ? opa : opb;
310 res |= (uint64_t)opr << (i * 8);
312 return res;
315 uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
317 uint64_t res = 0;
318 uint16_t opa, opb, opr;
319 int i;
321 for (i = 0; i < 4; ++i) {
322 opa = op1 >> (i * 16);
323 opb = op2 >> (i * 16);
324 opr = opa > opb ? opa : opb;
325 res |= (uint64_t)opr << (i * 16);
327 return res;
330 uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
332 uint64_t res = 0;
333 int16_t opa, opb;
334 uint16_t opr;
335 int i;
337 for (i = 0; i < 4; ++i) {
338 opa = op1 >> (i * 16);
339 opb = op2 >> (i * 16);
340 opr = opa > opb ? opa : opb;
341 res |= (uint64_t)opr << (i * 16);
343 return res;
346 uint64_t helper_perr (uint64_t op1, uint64_t op2)
348 uint64_t res = 0;
349 uint8_t opa, opb, opr;
350 int i;
352 for (i = 0; i < 8; ++i) {
353 opa = op1 >> (i * 8);
354 opb = op2 >> (i * 8);
355 if (opa >= opb)
356 opr = opa - opb;
357 else
358 opr = opb - opa;
359 res += opr;
361 return res;
364 uint64_t helper_pklb (uint64_t op1)
366 return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
369 uint64_t helper_pkwb (uint64_t op1)
371 return ((op1 & 0xff)
372 | ((op1 >> 8) & 0xff00)
373 | ((op1 >> 16) & 0xff0000)
374 | ((op1 >> 24) & 0xff000000));
377 uint64_t helper_unpkbl (uint64_t op1)
379 return (op1 & 0xff) | ((op1 & 0xff00) << 24);
382 uint64_t helper_unpkbw (uint64_t op1)
384 return ((op1 & 0xff)
385 | ((op1 & 0xff00) << 8)
386 | ((op1 & 0xff0000) << 16)
387 | ((op1 & 0xff000000) << 24));
390 /* Floating point helpers */
392 void helper_setroundmode (uint32_t val)
394 set_float_rounding_mode(val, &FP_STATUS);
397 void helper_setflushzero (uint32_t val)
399 set_flush_to_zero(val, &FP_STATUS);
402 void helper_fp_exc_clear (void)
404 set_float_exception_flags(0, &FP_STATUS);
407 uint32_t helper_fp_exc_get (void)
409 return get_float_exception_flags(&FP_STATUS);
412 /* Raise exceptions for ieee fp insns without software completion.
413 In that case there are no exceptions that don't trap; the mask
414 doesn't apply. */
415 void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
417 if (exc) {
418 uint32_t hw_exc = 0;
420 if (exc & float_flag_invalid) {
421 hw_exc |= EXC_M_INV;
423 if (exc & float_flag_divbyzero) {
424 hw_exc |= EXC_M_DZE;
426 if (exc & float_flag_overflow) {
427 hw_exc |= EXC_M_FOV;
429 if (exc & float_flag_underflow) {
430 hw_exc |= EXC_M_UNF;
432 if (exc & float_flag_inexact) {
433 hw_exc |= EXC_M_INE;
436 arith_excp(hw_exc, 1ull << regno);
440 /* Raise exceptions for ieee fp insns with software completion. */
441 void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
443 if (exc) {
444 env->fpcr_exc_status |= exc;
446 exc &= ~env->fpcr_exc_mask;
447 if (exc) {
448 helper_fp_exc_raise(exc, regno);
453 /* Input remapping without software completion. Handle denormal-map-to-zero
454 and trap for all other non-finite numbers. */
455 uint64_t helper_ieee_input(uint64_t val)
457 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
458 uint64_t frac = val & 0xfffffffffffffull;
460 if (exp == 0) {
461 if (frac != 0) {
462 /* If DNZ is set flush denormals to zero on input. */
463 if (env->fpcr_dnz) {
464 val &= 1ull << 63;
465 } else {
466 arith_excp(EXC_M_UNF, 0);
469 } else if (exp == 0x7ff) {
470 /* Infinity or NaN. */
471 /* ??? I'm not sure these exception bit flags are correct. I do
472 know that the Linux kernel, at least, doesn't rely on them and
473 just emulates the insn to figure out what exception to use. */
474 arith_excp(frac ? EXC_M_INV : EXC_M_FOV, 0);
476 return val;
479 /* Similar, but does not trap for infinities. Used for comparisons. */
480 uint64_t helper_ieee_input_cmp(uint64_t val)
482 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
483 uint64_t frac = val & 0xfffffffffffffull;
485 if (exp == 0) {
486 if (frac != 0) {
487 /* If DNZ is set flush denormals to zero on input. */
488 if (env->fpcr_dnz) {
489 val &= 1ull << 63;
490 } else {
491 arith_excp(EXC_M_UNF, 0);
494 } else if (exp == 0x7ff && frac) {
495 /* NaN. */
496 arith_excp(EXC_M_INV, 0);
498 return val;
501 /* Input remapping with software completion enabled. All we have to do
502 is handle denormal-map-to-zero; all other inputs get exceptions as
503 needed from the actual operation. */
504 uint64_t helper_ieee_input_s(uint64_t val)
506 if (env->fpcr_dnz) {
507 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
508 if (exp == 0) {
509 val &= 1ull << 63;
512 return val;
515 /* F floating (VAX) */
516 static inline uint64_t float32_to_f(float32 fa)
518 uint64_t r, exp, mant, sig;
519 CPU_FloatU a;
521 a.f = fa;
522 sig = ((uint64_t)a.l & 0x80000000) << 32;
523 exp = (a.l >> 23) & 0xff;
524 mant = ((uint64_t)a.l & 0x007fffff) << 29;
526 if (exp == 255) {
527 /* NaN or infinity */
528 r = 1; /* VAX dirty zero */
529 } else if (exp == 0) {
530 if (mant == 0) {
531 /* Zero */
532 r = 0;
533 } else {
534 /* Denormalized */
535 r = sig | ((exp + 1) << 52) | mant;
537 } else {
538 if (exp >= 253) {
539 /* Overflow */
540 r = 1; /* VAX dirty zero */
541 } else {
542 r = sig | ((exp + 2) << 52);
546 return r;
549 static inline float32 f_to_float32(uint64_t a)
551 uint32_t exp, mant_sig;
552 CPU_FloatU r;
554 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
555 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
557 if (unlikely(!exp && mant_sig)) {
558 /* Reserved operands / Dirty zero */
559 dynamic_excp(EXCP_OPCDEC, 0);
562 if (exp < 3) {
563 /* Underflow */
564 r.l = 0;
565 } else {
566 r.l = ((exp - 2) << 23) | mant_sig;
569 return r.f;
572 uint32_t helper_f_to_memory (uint64_t a)
574 uint32_t r;
575 r = (a & 0x00001fffe0000000ull) >> 13;
576 r |= (a & 0x07ffe00000000000ull) >> 45;
577 r |= (a & 0xc000000000000000ull) >> 48;
578 return r;
581 uint64_t helper_memory_to_f (uint32_t a)
583 uint64_t r;
584 r = ((uint64_t)(a & 0x0000c000)) << 48;
585 r |= ((uint64_t)(a & 0x003fffff)) << 45;
586 r |= ((uint64_t)(a & 0xffff0000)) << 13;
587 if (!(a & 0x00004000))
588 r |= 0x7ll << 59;
589 return r;
592 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
593 either implement VAX arithmetic properly or just signal invalid opcode. */
595 uint64_t helper_addf (uint64_t a, uint64_t b)
597 float32 fa, fb, fr;
599 fa = f_to_float32(a);
600 fb = f_to_float32(b);
601 fr = float32_add(fa, fb, &FP_STATUS);
602 return float32_to_f(fr);
605 uint64_t helper_subf (uint64_t a, uint64_t b)
607 float32 fa, fb, fr;
609 fa = f_to_float32(a);
610 fb = f_to_float32(b);
611 fr = float32_sub(fa, fb, &FP_STATUS);
612 return float32_to_f(fr);
615 uint64_t helper_mulf (uint64_t a, uint64_t b)
617 float32 fa, fb, fr;
619 fa = f_to_float32(a);
620 fb = f_to_float32(b);
621 fr = float32_mul(fa, fb, &FP_STATUS);
622 return float32_to_f(fr);
625 uint64_t helper_divf (uint64_t a, uint64_t b)
627 float32 fa, fb, fr;
629 fa = f_to_float32(a);
630 fb = f_to_float32(b);
631 fr = float32_div(fa, fb, &FP_STATUS);
632 return float32_to_f(fr);
635 uint64_t helper_sqrtf (uint64_t t)
637 float32 ft, fr;
639 ft = f_to_float32(t);
640 fr = float32_sqrt(ft, &FP_STATUS);
641 return float32_to_f(fr);
645 /* G floating (VAX) */
646 static inline uint64_t float64_to_g(float64 fa)
648 uint64_t r, exp, mant, sig;
649 CPU_DoubleU a;
651 a.d = fa;
652 sig = a.ll & 0x8000000000000000ull;
653 exp = (a.ll >> 52) & 0x7ff;
654 mant = a.ll & 0x000fffffffffffffull;
656 if (exp == 2047) {
657 /* NaN or infinity */
658 r = 1; /* VAX dirty zero */
659 } else if (exp == 0) {
660 if (mant == 0) {
661 /* Zero */
662 r = 0;
663 } else {
664 /* Denormalized */
665 r = sig | ((exp + 1) << 52) | mant;
667 } else {
668 if (exp >= 2045) {
669 /* Overflow */
670 r = 1; /* VAX dirty zero */
671 } else {
672 r = sig | ((exp + 2) << 52);
676 return r;
679 static inline float64 g_to_float64(uint64_t a)
681 uint64_t exp, mant_sig;
682 CPU_DoubleU r;
684 exp = (a >> 52) & 0x7ff;
685 mant_sig = a & 0x800fffffffffffffull;
687 if (!exp && mant_sig) {
688 /* Reserved operands / Dirty zero */
689 dynamic_excp(EXCP_OPCDEC, 0);
692 if (exp < 3) {
693 /* Underflow */
694 r.ll = 0;
695 } else {
696 r.ll = ((exp - 2) << 52) | mant_sig;
699 return r.d;
702 uint64_t helper_g_to_memory (uint64_t a)
704 uint64_t r;
705 r = (a & 0x000000000000ffffull) << 48;
706 r |= (a & 0x00000000ffff0000ull) << 16;
707 r |= (a & 0x0000ffff00000000ull) >> 16;
708 r |= (a & 0xffff000000000000ull) >> 48;
709 return r;
712 uint64_t helper_memory_to_g (uint64_t a)
714 uint64_t r;
715 r = (a & 0x000000000000ffffull) << 48;
716 r |= (a & 0x00000000ffff0000ull) << 16;
717 r |= (a & 0x0000ffff00000000ull) >> 16;
718 r |= (a & 0xffff000000000000ull) >> 48;
719 return r;
722 uint64_t helper_addg (uint64_t a, uint64_t b)
724 float64 fa, fb, fr;
726 fa = g_to_float64(a);
727 fb = g_to_float64(b);
728 fr = float64_add(fa, fb, &FP_STATUS);
729 return float64_to_g(fr);
732 uint64_t helper_subg (uint64_t a, uint64_t b)
734 float64 fa, fb, fr;
736 fa = g_to_float64(a);
737 fb = g_to_float64(b);
738 fr = float64_sub(fa, fb, &FP_STATUS);
739 return float64_to_g(fr);
742 uint64_t helper_mulg (uint64_t a, uint64_t b)
744 float64 fa, fb, fr;
746 fa = g_to_float64(a);
747 fb = g_to_float64(b);
748 fr = float64_mul(fa, fb, &FP_STATUS);
749 return float64_to_g(fr);
752 uint64_t helper_divg (uint64_t a, uint64_t b)
754 float64 fa, fb, fr;
756 fa = g_to_float64(a);
757 fb = g_to_float64(b);
758 fr = float64_div(fa, fb, &FP_STATUS);
759 return float64_to_g(fr);
762 uint64_t helper_sqrtg (uint64_t a)
764 float64 fa, fr;
766 fa = g_to_float64(a);
767 fr = float64_sqrt(fa, &FP_STATUS);
768 return float64_to_g(fr);
772 /* S floating (single) */
774 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
775 static inline uint64_t float32_to_s_int(uint32_t fi)
777 uint32_t frac = fi & 0x7fffff;
778 uint32_t sign = fi >> 31;
779 uint32_t exp_msb = (fi >> 30) & 1;
780 uint32_t exp_low = (fi >> 23) & 0x7f;
781 uint32_t exp;
783 exp = (exp_msb << 10) | exp_low;
784 if (exp_msb) {
785 if (exp_low == 0x7f)
786 exp = 0x7ff;
787 } else {
788 if (exp_low != 0x00)
789 exp |= 0x380;
792 return (((uint64_t)sign << 63)
793 | ((uint64_t)exp << 52)
794 | ((uint64_t)frac << 29));
797 static inline uint64_t float32_to_s(float32 fa)
799 CPU_FloatU a;
800 a.f = fa;
801 return float32_to_s_int(a.l);
804 static inline uint32_t s_to_float32_int(uint64_t a)
806 return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
809 static inline float32 s_to_float32(uint64_t a)
811 CPU_FloatU r;
812 r.l = s_to_float32_int(a);
813 return r.f;
816 uint32_t helper_s_to_memory (uint64_t a)
818 return s_to_float32_int(a);
821 uint64_t helper_memory_to_s (uint32_t a)
823 return float32_to_s_int(a);
826 uint64_t helper_adds (uint64_t a, uint64_t b)
828 float32 fa, fb, fr;
830 fa = s_to_float32(a);
831 fb = s_to_float32(b);
832 fr = float32_add(fa, fb, &FP_STATUS);
833 return float32_to_s(fr);
836 uint64_t helper_subs (uint64_t a, uint64_t b)
838 float32 fa, fb, fr;
840 fa = s_to_float32(a);
841 fb = s_to_float32(b);
842 fr = float32_sub(fa, fb, &FP_STATUS);
843 return float32_to_s(fr);
846 uint64_t helper_muls (uint64_t a, uint64_t b)
848 float32 fa, fb, fr;
850 fa = s_to_float32(a);
851 fb = s_to_float32(b);
852 fr = float32_mul(fa, fb, &FP_STATUS);
853 return float32_to_s(fr);
856 uint64_t helper_divs (uint64_t a, uint64_t b)
858 float32 fa, fb, fr;
860 fa = s_to_float32(a);
861 fb = s_to_float32(b);
862 fr = float32_div(fa, fb, &FP_STATUS);
863 return float32_to_s(fr);
866 uint64_t helper_sqrts (uint64_t a)
868 float32 fa, fr;
870 fa = s_to_float32(a);
871 fr = float32_sqrt(fa, &FP_STATUS);
872 return float32_to_s(fr);
876 /* T floating (double) */
877 static inline float64 t_to_float64(uint64_t a)
879 /* Memory format is the same as float64 */
880 CPU_DoubleU r;
881 r.ll = a;
882 return r.d;
885 static inline uint64_t float64_to_t(float64 fa)
887 /* Memory format is the same as float64 */
888 CPU_DoubleU r;
889 r.d = fa;
890 return r.ll;
893 uint64_t helper_addt (uint64_t a, uint64_t b)
895 float64 fa, fb, fr;
897 fa = t_to_float64(a);
898 fb = t_to_float64(b);
899 fr = float64_add(fa, fb, &FP_STATUS);
900 return float64_to_t(fr);
903 uint64_t helper_subt (uint64_t a, uint64_t b)
905 float64 fa, fb, fr;
907 fa = t_to_float64(a);
908 fb = t_to_float64(b);
909 fr = float64_sub(fa, fb, &FP_STATUS);
910 return float64_to_t(fr);
913 uint64_t helper_mult (uint64_t a, uint64_t b)
915 float64 fa, fb, fr;
917 fa = t_to_float64(a);
918 fb = t_to_float64(b);
919 fr = float64_mul(fa, fb, &FP_STATUS);
920 return float64_to_t(fr);
923 uint64_t helper_divt (uint64_t a, uint64_t b)
925 float64 fa, fb, fr;
927 fa = t_to_float64(a);
928 fb = t_to_float64(b);
929 fr = float64_div(fa, fb, &FP_STATUS);
930 return float64_to_t(fr);
933 uint64_t helper_sqrtt (uint64_t a)
935 float64 fa, fr;
937 fa = t_to_float64(a);
938 fr = float64_sqrt(fa, &FP_STATUS);
939 return float64_to_t(fr);
942 /* Comparisons */
943 uint64_t helper_cmptun (uint64_t a, uint64_t b)
945 float64 fa, fb;
947 fa = t_to_float64(a);
948 fb = t_to_float64(b);
950 if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
951 return 0x4000000000000000ULL;
952 } else {
953 return 0;
957 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
959 float64 fa, fb;
961 fa = t_to_float64(a);
962 fb = t_to_float64(b);
964 if (float64_eq_quiet(fa, fb, &FP_STATUS))
965 return 0x4000000000000000ULL;
966 else
967 return 0;
970 uint64_t helper_cmptle(uint64_t a, uint64_t b)
972 float64 fa, fb;
974 fa = t_to_float64(a);
975 fb = t_to_float64(b);
977 if (float64_le(fa, fb, &FP_STATUS))
978 return 0x4000000000000000ULL;
979 else
980 return 0;
983 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
985 float64 fa, fb;
987 fa = t_to_float64(a);
988 fb = t_to_float64(b);
990 if (float64_lt(fa, fb, &FP_STATUS))
991 return 0x4000000000000000ULL;
992 else
993 return 0;
996 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
998 float64 fa, fb;
1000 fa = g_to_float64(a);
1001 fb = g_to_float64(b);
1003 if (float64_eq_quiet(fa, fb, &FP_STATUS))
1004 return 0x4000000000000000ULL;
1005 else
1006 return 0;
1009 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
1011 float64 fa, fb;
1013 fa = g_to_float64(a);
1014 fb = g_to_float64(b);
1016 if (float64_le(fa, fb, &FP_STATUS))
1017 return 0x4000000000000000ULL;
1018 else
1019 return 0;
1022 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
1024 float64 fa, fb;
1026 fa = g_to_float64(a);
1027 fb = g_to_float64(b);
1029 if (float64_lt(fa, fb, &FP_STATUS))
1030 return 0x4000000000000000ULL;
1031 else
1032 return 0;
1035 /* Floating point format conversion */
1036 uint64_t helper_cvtts (uint64_t a)
1038 float64 fa;
1039 float32 fr;
1041 fa = t_to_float64(a);
1042 fr = float64_to_float32(fa, &FP_STATUS);
1043 return float32_to_s(fr);
1046 uint64_t helper_cvtst (uint64_t a)
1048 float32 fa;
1049 float64 fr;
1051 fa = s_to_float32(a);
1052 fr = float32_to_float64(fa, &FP_STATUS);
1053 return float64_to_t(fr);
1056 uint64_t helper_cvtqs (uint64_t a)
1058 float32 fr = int64_to_float32(a, &FP_STATUS);
1059 return float32_to_s(fr);
1062 /* Implement float64 to uint64 conversion without saturation -- we must
1063 supply the truncated result. This behaviour is used by the compiler
1064 to get unsigned conversion for free with the same instruction.
1066 The VI flag is set when overflow or inexact exceptions should be raised. */
1068 static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
1070 uint64_t frac, ret = 0;
1071 uint32_t exp, sign, exc = 0;
1072 int shift;
1074 sign = (a >> 63);
1075 exp = (uint32_t)(a >> 52) & 0x7ff;
1076 frac = a & 0xfffffffffffffull;
1078 if (exp == 0) {
1079 if (unlikely(frac != 0)) {
1080 goto do_underflow;
1082 } else if (exp == 0x7ff) {
1083 exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1084 } else {
1085 /* Restore implicit bit. */
1086 frac |= 0x10000000000000ull;
1088 shift = exp - 1023 - 52;
1089 if (shift >= 0) {
1090 /* In this case the number is so large that we must shift
1091 the fraction left. There is no rounding to do. */
1092 if (shift < 63) {
1093 ret = frac << shift;
1094 if (VI && (ret >> shift) != frac) {
1095 exc = float_flag_overflow;
1098 } else {
1099 uint64_t round;
1101 /* In this case the number is smaller than the fraction as
1102 represented by the 52 bit number. Here we must think
1103 about rounding the result. Handle this by shifting the
1104 fractional part of the number into the high bits of ROUND.
1105 This will let us efficiently handle round-to-nearest. */
1106 shift = -shift;
1107 if (shift < 63) {
1108 ret = frac >> shift;
1109 round = frac << (64 - shift);
1110 } else {
1111 /* The exponent is so small we shift out everything.
1112 Leave a sticky bit for proper rounding below. */
1113 do_underflow:
1114 round = 1;
1117 if (round) {
1118 exc = (VI ? float_flag_inexact : 0);
1119 switch (roundmode) {
1120 case float_round_nearest_even:
1121 if (round == (1ull << 63)) {
1122 /* Fraction is exactly 0.5; round to even. */
1123 ret += (ret & 1);
1124 } else if (round > (1ull << 63)) {
1125 ret += 1;
1127 break;
1128 case float_round_to_zero:
1129 break;
1130 case float_round_up:
1131 ret += 1 - sign;
1132 break;
1133 case float_round_down:
1134 ret += sign;
1135 break;
1139 if (sign) {
1140 ret = -ret;
1143 if (unlikely(exc)) {
1144 float_raise(exc, &FP_STATUS);
1147 return ret;
1150 uint64_t helper_cvttq(uint64_t a)
1152 return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1155 uint64_t helper_cvttq_c(uint64_t a)
1157 return helper_cvttq_internal(a, float_round_to_zero, 0);
1160 uint64_t helper_cvttq_svic(uint64_t a)
1162 return helper_cvttq_internal(a, float_round_to_zero, 1);
1165 uint64_t helper_cvtqt (uint64_t a)
1167 float64 fr = int64_to_float64(a, &FP_STATUS);
1168 return float64_to_t(fr);
1171 uint64_t helper_cvtqf (uint64_t a)
1173 float32 fr = int64_to_float32(a, &FP_STATUS);
1174 return float32_to_f(fr);
1177 uint64_t helper_cvtgf (uint64_t a)
1179 float64 fa;
1180 float32 fr;
1182 fa = g_to_float64(a);
1183 fr = float64_to_float32(fa, &FP_STATUS);
1184 return float32_to_f(fr);
1187 uint64_t helper_cvtgq (uint64_t a)
1189 float64 fa = g_to_float64(a);
1190 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1193 uint64_t helper_cvtqg (uint64_t a)
1195 float64 fr;
1196 fr = int64_to_float64(a, &FP_STATUS);
1197 return float64_to_g(fr);
1200 /* PALcode support special instructions */
1201 #if !defined (CONFIG_USER_ONLY)
1202 void helper_hw_ret (uint64_t a)
1204 env->pc = a & ~3;
1205 env->intr_flag = 0;
1206 env->lock_addr = -1;
1207 if ((a & 1) == 0) {
1208 env->pal_mode = 0;
1209 swap_shadow_regs(env);
1213 void helper_tbia(void)
1215 tlb_flush(env, 1);
1218 void helper_tbis(uint64_t p)
1220 tlb_flush_page(env, p);
1223 void helper_halt(uint64_t restart)
1225 if (restart) {
1226 qemu_system_reset_request();
1227 } else {
1228 qemu_system_shutdown_request();
1232 uint64_t helper_get_time(void)
1234 return qemu_get_clock_ns(rtc_clock);
1237 void helper_set_alarm(uint64_t expire)
1239 if (expire) {
1240 env->alarm_expire = expire;
1241 qemu_mod_timer(env->alarm_timer, expire);
1242 } else {
1243 qemu_del_timer(env->alarm_timer);
1246 #endif
1248 /*****************************************************************************/
1249 /* Softmmu support */
1250 #if !defined (CONFIG_USER_ONLY)
1251 uint64_t helper_ldl_phys(uint64_t p)
1253 return (int32_t)ldl_phys(p);
1256 uint64_t helper_ldq_phys(uint64_t p)
1258 return ldq_phys(p);
1261 uint64_t helper_ldl_l_phys(uint64_t p)
1263 env->lock_addr = p;
1264 return env->lock_value = (int32_t)ldl_phys(p);
1267 uint64_t helper_ldq_l_phys(uint64_t p)
1269 env->lock_addr = p;
1270 return env->lock_value = ldl_phys(p);
1273 void helper_stl_phys(uint64_t p, uint64_t v)
1275 stl_phys(p, v);
1278 void helper_stq_phys(uint64_t p, uint64_t v)
1280 stq_phys(p, v);
1283 uint64_t helper_stl_c_phys(uint64_t p, uint64_t v)
1285 uint64_t ret = 0;
1287 if (p == env->lock_addr) {
1288 int32_t old = ldl_phys(p);
1289 if (old == (int32_t)env->lock_value) {
1290 stl_phys(p, v);
1291 ret = 1;
1294 env->lock_addr = -1;
1296 return ret;
1299 uint64_t helper_stq_c_phys(uint64_t p, uint64_t v)
1301 uint64_t ret = 0;
1303 if (p == env->lock_addr) {
1304 uint64_t old = ldq_phys(p);
1305 if (old == env->lock_value) {
1306 stq_phys(p, v);
1307 ret = 1;
1310 env->lock_addr = -1;
1312 return ret;
1315 static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
1316 int is_user, void *retaddr)
1318 uint64_t pc;
1319 uint32_t insn;
1321 do_restore_state(retaddr);
1323 pc = env->pc;
1324 insn = ldl_code(pc);
1326 env->trap_arg0 = addr;
1327 env->trap_arg1 = insn >> 26; /* opcode */
1328 env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
1329 helper_excp(EXCP_UNALIGN, 0);
1332 void QEMU_NORETURN cpu_unassigned_access(CPUState *env1,
1333 target_phys_addr_t addr, int is_write,
1334 int is_exec, int unused, int size)
1336 env = env1;
1337 env->trap_arg0 = addr;
1338 env->trap_arg1 = is_write;
1339 dynamic_excp(EXCP_MCHK, 0);
1342 #include "softmmu_exec.h"
1344 #define MMUSUFFIX _mmu
1345 #define ALIGNED_ONLY
1347 #define SHIFT 0
1348 #include "softmmu_template.h"
1350 #define SHIFT 1
1351 #include "softmmu_template.h"
1353 #define SHIFT 2
1354 #include "softmmu_template.h"
1356 #define SHIFT 3
1357 #include "softmmu_template.h"
1359 /* try to fill the TLB and return an exception if error. If retaddr is
1360 NULL, it means that the function was called in C code (i.e. not
1361 from generated code or from helper.c) */
1362 /* XXX: fix it to restore all registers */
1363 void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
1364 void *retaddr)
1366 CPUState *saved_env;
1367 int ret;
1369 saved_env = env;
1370 env = env1;
1371 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx);
1372 if (unlikely(ret != 0)) {
1373 do_restore_state(retaddr);
1374 /* Exception index and error code are already set */
1375 cpu_loop_exit(env);
1377 env = saved_env;
1379 #endif