Update cocoa.m to match new DisplayState code (Samuel Benson)
[qemu/mini2440/sniper_sniper_test.git] / target-alpha / op_helper.c
blob4015d4a725379fe7400c516c5d788307424947e7
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "softfloat.h"
24 #include "helper.h"
26 void helper_tb_flush (void)
28 tlb_flush(env, 1);
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
33 void helper_excp (int excp, int error)
35 env->exception_index = excp;
36 env->error_code = error;
37 cpu_loop_exit();
40 uint64_t helper_amask (uint64_t arg)
42 switch (env->implver) {
43 case IMPLVER_2106x:
44 /* EV4, EV45, LCA, LCA45 & EV5 */
45 break;
46 case IMPLVER_21164:
47 case IMPLVER_21264:
48 case IMPLVER_21364:
49 arg &= ~env->amask;
50 break;
52 return arg;
55 uint64_t helper_load_pcc (void)
57 /* XXX: TODO */
58 return 0;
61 uint64_t helper_load_implver (void)
63 return env->implver;
66 uint64_t helper_load_fpcr (void)
68 uint64_t ret = 0;
69 #ifdef CONFIG_SOFTFLOAT
70 ret |= env->fp_status.float_exception_flags << 52;
71 if (env->fp_status.float_exception_flags)
72 ret |= 1ULL << 63;
73 env->ipr[IPR_EXC_SUM] &= ~0x3E:
74 env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
75 #endif
76 switch (env->fp_status.float_rounding_mode) {
77 case float_round_nearest_even:
78 ret |= 2ULL << 58;
79 break;
80 case float_round_down:
81 ret |= 1ULL << 58;
82 break;
83 case float_round_up:
84 ret |= 3ULL << 58;
85 break;
86 case float_round_to_zero:
87 break;
89 return ret;
92 void helper_store_fpcr (uint64_t val)
94 #ifdef CONFIG_SOFTFLOAT
95 set_float_exception_flags((val >> 52) & 0x3F, &FP_STATUS);
96 #endif
97 switch ((val >> 58) & 3) {
98 case 0:
99 set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
100 break;
101 case 1:
102 set_float_rounding_mode(float_round_down, &FP_STATUS);
103 break;
104 case 2:
105 set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
106 break;
107 case 3:
108 set_float_rounding_mode(float_round_up, &FP_STATUS);
109 break;
113 spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
115 uint64_t helper_rs(void)
117 uint64_t tmp;
119 spin_lock(&intr_cpu_lock);
120 tmp = env->intr_flag;
121 env->intr_flag = 1;
122 spin_unlock(&intr_cpu_lock);
124 return tmp;
127 uint64_t helper_rc(void)
129 uint64_t tmp;
131 spin_lock(&intr_cpu_lock);
132 tmp = env->intr_flag;
133 env->intr_flag = 0;
134 spin_unlock(&intr_cpu_lock);
136 return tmp;
139 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
141 uint64_t tmp = op1;
142 op1 += op2;
143 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
144 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
146 return op1;
149 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
151 uint64_t tmp = op1;
152 op1 = (uint32_t)(op1 + op2);
153 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
154 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
156 return op1;
159 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
161 uint64_t tmp = op1;
162 op1 -= op2;
163 if (unlikely(((~tmp) ^ op1 ^ (-1ULL)) & ((~tmp) ^ op2) & (1ULL << 63))) {
164 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
166 return op1;
169 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
171 uint64_t tmp = op1;
172 op1 = (uint32_t)(op1 - op2);
173 if (unlikely(((~tmp) ^ op1 ^ (-1UL)) & ((~tmp) ^ op2) & (1UL << 31))) {
174 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
176 return op1;
179 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
181 int64_t res = (int64_t)op1 * (int64_t)op2;
183 if (unlikely((int32_t)res != res)) {
184 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
186 return (int64_t)((int32_t)res);
189 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
191 uint64_t tl, th;
193 muls64(&tl, &th, op1, op2);
194 /* If th != 0 && th != -1, then we had an overflow */
195 if (unlikely((th + 1) > 1)) {
196 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
198 return tl;
201 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
203 uint64_t tl, th;
205 mulu64(&tl, &th, op1, op2);
206 return th;
209 uint64_t helper_ctpop (uint64_t arg)
211 return ctpop64(arg);
214 uint64_t helper_ctlz (uint64_t arg)
216 return clz64(arg);
219 uint64_t helper_cttz (uint64_t arg)
221 return ctz64(arg);
224 static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
226 uint64_t mask;
228 mask = 0;
229 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
230 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
231 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
232 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
233 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
234 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
235 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
236 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
238 return op & ~mask;
241 uint64_t helper_mskbl(uint64_t val, uint64_t mask)
243 return byte_zap(val, 0x01 << (mask & 7));
246 uint64_t helper_insbl(uint64_t val, uint64_t mask)
248 val <<= (mask & 7) * 8;
249 return byte_zap(val, ~(0x01 << (mask & 7)));
252 uint64_t helper_mskwl(uint64_t val, uint64_t mask)
254 return byte_zap(val, 0x03 << (mask & 7));
257 uint64_t helper_inswl(uint64_t val, uint64_t mask)
259 val <<= (mask & 7) * 8;
260 return byte_zap(val, ~(0x03 << (mask & 7)));
263 uint64_t helper_mskll(uint64_t val, uint64_t mask)
265 return byte_zap(val, 0x0F << (mask & 7));
268 uint64_t helper_insll(uint64_t val, uint64_t mask)
270 val <<= (mask & 7) * 8;
271 return byte_zap(val, ~(0x0F << (mask & 7)));
274 uint64_t helper_zap(uint64_t val, uint64_t mask)
276 return byte_zap(val, mask);
279 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
281 return byte_zap(val, ~mask);
284 uint64_t helper_mskql(uint64_t val, uint64_t mask)
286 return byte_zap(val, 0xFF << (mask & 7));
289 uint64_t helper_insql(uint64_t val, uint64_t mask)
291 val <<= (mask & 7) * 8;
292 return byte_zap(val, ~(0xFF << (mask & 7)));
295 uint64_t helper_mskwh(uint64_t val, uint64_t mask)
297 return byte_zap(val, (0x03 << (mask & 7)) >> 8);
300 uint64_t helper_inswh(uint64_t val, uint64_t mask)
302 val >>= 64 - ((mask & 7) * 8);
303 return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
306 uint64_t helper_msklh(uint64_t val, uint64_t mask)
308 return byte_zap(val, (0x0F << (mask & 7)) >> 8);
311 uint64_t helper_inslh(uint64_t val, uint64_t mask)
313 val >>= 64 - ((mask & 7) * 8);
314 return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
317 uint64_t helper_mskqh(uint64_t val, uint64_t mask)
319 return byte_zap(val, (0xFF << (mask & 7)) >> 8);
322 uint64_t helper_insqh(uint64_t val, uint64_t mask)
324 val >>= 64 - ((mask & 7) * 8);
325 return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
328 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
330 uint8_t opa, opb, res;
331 int i;
333 res = 0;
334 for (i = 0; i < 8; i++) {
335 opa = op1 >> (i * 8);
336 opb = op2 >> (i * 8);
337 if (opa >= opb)
338 res |= 1 << i;
340 return res;
343 /* Floating point helpers */
345 /* F floating (VAX) */
346 static always_inline uint64_t float32_to_f (float32 fa)
348 uint64_t r, exp, mant, sig;
349 CPU_FloatU a;
351 a.f = fa;
352 sig = ((uint64_t)a.l & 0x80000000) << 32;
353 exp = (a.l >> 23) & 0xff;
354 mant = ((uint64_t)a.l & 0x007fffff) << 29;
356 if (exp == 255) {
357 /* NaN or infinity */
358 r = 1; /* VAX dirty zero */
359 } else if (exp == 0) {
360 if (mant == 0) {
361 /* Zero */
362 r = 0;
363 } else {
364 /* Denormalized */
365 r = sig | ((exp + 1) << 52) | mant;
367 } else {
368 if (exp >= 253) {
369 /* Overflow */
370 r = 1; /* VAX dirty zero */
371 } else {
372 r = sig | ((exp + 2) << 52);
376 return r;
379 static always_inline float32 f_to_float32 (uint64_t a)
381 uint32_t exp, mant_sig;
382 CPU_FloatU r;
384 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
385 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
387 if (unlikely(!exp && mant_sig)) {
388 /* Reserved operands / Dirty zero */
389 helper_excp(EXCP_OPCDEC, 0);
392 if (exp < 3) {
393 /* Underflow */
394 r.l = 0;
395 } else {
396 r.l = ((exp - 2) << 23) | mant_sig;
399 return r.f;
402 uint32_t helper_f_to_memory (uint64_t a)
404 uint32_t r;
405 r = (a & 0x00001fffe0000000ull) >> 13;
406 r |= (a & 0x07ffe00000000000ull) >> 45;
407 r |= (a & 0xc000000000000000ull) >> 48;
408 return r;
411 uint64_t helper_memory_to_f (uint32_t a)
413 uint64_t r;
414 r = ((uint64_t)(a & 0x0000c000)) << 48;
415 r |= ((uint64_t)(a & 0x003fffff)) << 45;
416 r |= ((uint64_t)(a & 0xffff0000)) << 13;
417 if (!(a & 0x00004000))
418 r |= 0x7ll << 59;
419 return r;
422 uint64_t helper_addf (uint64_t a, uint64_t b)
424 float32 fa, fb, fr;
426 fa = f_to_float32(a);
427 fb = f_to_float32(b);
428 fr = float32_add(fa, fb, &FP_STATUS);
429 return float32_to_f(fr);
432 uint64_t helper_subf (uint64_t a, uint64_t b)
434 float32 fa, fb, fr;
436 fa = f_to_float32(a);
437 fb = f_to_float32(b);
438 fr = float32_sub(fa, fb, &FP_STATUS);
439 return float32_to_f(fr);
442 uint64_t helper_mulf (uint64_t a, uint64_t b)
444 float32 fa, fb, fr;
446 fa = f_to_float32(a);
447 fb = f_to_float32(b);
448 fr = float32_mul(fa, fb, &FP_STATUS);
449 return float32_to_f(fr);
452 uint64_t helper_divf (uint64_t a, uint64_t b)
454 float32 fa, fb, fr;
456 fa = f_to_float32(a);
457 fb = f_to_float32(b);
458 fr = float32_div(fa, fb, &FP_STATUS);
459 return float32_to_f(fr);
462 uint64_t helper_sqrtf (uint64_t t)
464 float32 ft, fr;
466 ft = f_to_float32(t);
467 fr = float32_sqrt(ft, &FP_STATUS);
468 return float32_to_f(fr);
472 /* G floating (VAX) */
473 static always_inline uint64_t float64_to_g (float64 fa)
475 uint64_t r, exp, mant, sig;
476 CPU_DoubleU a;
478 a.d = fa;
479 sig = a.ll & 0x8000000000000000ull;
480 exp = (a.ll >> 52) & 0x7ff;
481 mant = a.ll & 0x000fffffffffffffull;
483 if (exp == 2047) {
484 /* NaN or infinity */
485 r = 1; /* VAX dirty zero */
486 } else if (exp == 0) {
487 if (mant == 0) {
488 /* Zero */
489 r = 0;
490 } else {
491 /* Denormalized */
492 r = sig | ((exp + 1) << 52) | mant;
494 } else {
495 if (exp >= 2045) {
496 /* Overflow */
497 r = 1; /* VAX dirty zero */
498 } else {
499 r = sig | ((exp + 2) << 52);
503 return r;
506 static always_inline float64 g_to_float64 (uint64_t a)
508 uint64_t exp, mant_sig;
509 CPU_DoubleU r;
511 exp = (a >> 52) & 0x7ff;
512 mant_sig = a & 0x800fffffffffffffull;
514 if (!exp && mant_sig) {
515 /* Reserved operands / Dirty zero */
516 helper_excp(EXCP_OPCDEC, 0);
519 if (exp < 3) {
520 /* Underflow */
521 r.ll = 0;
522 } else {
523 r.ll = ((exp - 2) << 52) | mant_sig;
526 return r.d;
529 uint64_t helper_g_to_memory (uint64_t a)
531 uint64_t r;
532 r = (a & 0x000000000000ffffull) << 48;
533 r |= (a & 0x00000000ffff0000ull) << 16;
534 r |= (a & 0x0000ffff00000000ull) >> 16;
535 r |= (a & 0xffff000000000000ull) >> 48;
536 return r;
539 uint64_t helper_memory_to_g (uint64_t a)
541 uint64_t r;
542 r = (a & 0x000000000000ffffull) << 48;
543 r |= (a & 0x00000000ffff0000ull) << 16;
544 r |= (a & 0x0000ffff00000000ull) >> 16;
545 r |= (a & 0xffff000000000000ull) >> 48;
546 return r;
549 uint64_t helper_addg (uint64_t a, uint64_t b)
551 float64 fa, fb, fr;
553 fa = g_to_float64(a);
554 fb = g_to_float64(b);
555 fr = float64_add(fa, fb, &FP_STATUS);
556 return float64_to_g(fr);
559 uint64_t helper_subg (uint64_t a, uint64_t b)
561 float64 fa, fb, fr;
563 fa = g_to_float64(a);
564 fb = g_to_float64(b);
565 fr = float64_sub(fa, fb, &FP_STATUS);
566 return float64_to_g(fr);
569 uint64_t helper_mulg (uint64_t a, uint64_t b)
571 float64 fa, fb, fr;
573 fa = g_to_float64(a);
574 fb = g_to_float64(b);
575 fr = float64_mul(fa, fb, &FP_STATUS);
576 return float64_to_g(fr);
579 uint64_t helper_divg (uint64_t a, uint64_t b)
581 float64 fa, fb, fr;
583 fa = g_to_float64(a);
584 fb = g_to_float64(b);
585 fr = float64_div(fa, fb, &FP_STATUS);
586 return float64_to_g(fr);
589 uint64_t helper_sqrtg (uint64_t a)
591 float64 fa, fr;
593 fa = g_to_float64(a);
594 fr = float64_sqrt(fa, &FP_STATUS);
595 return float64_to_g(fr);
599 /* S floating (single) */
600 static always_inline uint64_t float32_to_s (float32 fa)
602 CPU_FloatU a;
603 uint64_t r;
605 a.f = fa;
607 r = (((uint64_t)(a.l & 0xc0000000)) << 32) | (((uint64_t)(a.l & 0x3fffffff)) << 29);
608 if (((a.l & 0x7f800000) != 0x7f800000) && (!(a.l & 0x40000000)))
609 r |= 0x7ll << 59;
610 return r;
613 static always_inline float32 s_to_float32 (uint64_t a)
615 CPU_FloatU r;
616 r.l = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
617 return r.f;
620 uint32_t helper_s_to_memory (uint64_t a)
622 /* Memory format is the same as float32 */
623 float32 fa = s_to_float32(a);
624 return *(uint32_t*)(&fa);
627 uint64_t helper_memory_to_s (uint32_t a)
629 /* Memory format is the same as float32 */
630 return float32_to_s(*(float32*)(&a));
633 uint64_t helper_adds (uint64_t a, uint64_t b)
635 float32 fa, fb, fr;
637 fa = s_to_float32(a);
638 fb = s_to_float32(b);
639 fr = float32_add(fa, fb, &FP_STATUS);
640 return float32_to_s(fr);
643 uint64_t helper_subs (uint64_t a, uint64_t b)
645 float32 fa, fb, fr;
647 fa = s_to_float32(a);
648 fb = s_to_float32(b);
649 fr = float32_sub(fa, fb, &FP_STATUS);
650 return float32_to_s(fr);
653 uint64_t helper_muls (uint64_t a, uint64_t b)
655 float32 fa, fb, fr;
657 fa = s_to_float32(a);
658 fb = s_to_float32(b);
659 fr = float32_mul(fa, fb, &FP_STATUS);
660 return float32_to_s(fr);
663 uint64_t helper_divs (uint64_t a, uint64_t b)
665 float32 fa, fb, fr;
667 fa = s_to_float32(a);
668 fb = s_to_float32(b);
669 fr = float32_div(fa, fb, &FP_STATUS);
670 return float32_to_s(fr);
673 uint64_t helper_sqrts (uint64_t a)
675 float32 fa, fr;
677 fa = s_to_float32(a);
678 fr = float32_sqrt(fa, &FP_STATUS);
679 return float32_to_s(fr);
683 /* T floating (double) */
684 static always_inline float64 t_to_float64 (uint64_t a)
686 /* Memory format is the same as float64 */
687 CPU_DoubleU r;
688 r.ll = a;
689 return r.d;
692 static always_inline uint64_t float64_to_t (float64 fa)
694 /* Memory format is the same as float64 */
695 CPU_DoubleU r;
696 r.d = fa;
697 return r.ll;
700 uint64_t helper_addt (uint64_t a, uint64_t b)
702 float64 fa, fb, fr;
704 fa = t_to_float64(a);
705 fb = t_to_float64(b);
706 fr = float64_add(fa, fb, &FP_STATUS);
707 return float64_to_t(fr);
710 uint64_t helper_subt (uint64_t a, uint64_t b)
712 float64 fa, fb, fr;
714 fa = t_to_float64(a);
715 fb = t_to_float64(b);
716 fr = float64_sub(fa, fb, &FP_STATUS);
717 return float64_to_t(fr);
720 uint64_t helper_mult (uint64_t a, uint64_t b)
722 float64 fa, fb, fr;
724 fa = t_to_float64(a);
725 fb = t_to_float64(b);
726 fr = float64_mul(fa, fb, &FP_STATUS);
727 return float64_to_t(fr);
730 uint64_t helper_divt (uint64_t a, uint64_t b)
732 float64 fa, fb, fr;
734 fa = t_to_float64(a);
735 fb = t_to_float64(b);
736 fr = float64_div(fa, fb, &FP_STATUS);
737 return float64_to_t(fr);
740 uint64_t helper_sqrtt (uint64_t a)
742 float64 fa, fr;
744 fa = t_to_float64(a);
745 fr = float64_sqrt(fa, &FP_STATUS);
746 return float64_to_t(fr);
750 /* Sign copy */
751 uint64_t helper_cpys(uint64_t a, uint64_t b)
753 return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
756 uint64_t helper_cpysn(uint64_t a, uint64_t b)
758 return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
761 uint64_t helper_cpyse(uint64_t a, uint64_t b)
763 return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
767 /* Comparisons */
768 uint64_t helper_cmptun (uint64_t a, uint64_t b)
770 float64 fa, fb;
772 fa = t_to_float64(a);
773 fb = t_to_float64(b);
775 if (float64_is_nan(fa) || float64_is_nan(fb))
776 return 0x4000000000000000ULL;
777 else
778 return 0;
781 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
783 float64 fa, fb;
785 fa = t_to_float64(a);
786 fb = t_to_float64(b);
788 if (float64_eq(fa, fb, &FP_STATUS))
789 return 0x4000000000000000ULL;
790 else
791 return 0;
794 uint64_t helper_cmptle(uint64_t a, uint64_t b)
796 float64 fa, fb;
798 fa = t_to_float64(a);
799 fb = t_to_float64(b);
801 if (float64_le(fa, fb, &FP_STATUS))
802 return 0x4000000000000000ULL;
803 else
804 return 0;
807 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
809 float64 fa, fb;
811 fa = t_to_float64(a);
812 fb = t_to_float64(b);
814 if (float64_lt(fa, fb, &FP_STATUS))
815 return 0x4000000000000000ULL;
816 else
817 return 0;
820 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
822 float64 fa, fb;
824 fa = g_to_float64(a);
825 fb = g_to_float64(b);
827 if (float64_eq(fa, fb, &FP_STATUS))
828 return 0x4000000000000000ULL;
829 else
830 return 0;
833 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
835 float64 fa, fb;
837 fa = g_to_float64(a);
838 fb = g_to_float64(b);
840 if (float64_le(fa, fb, &FP_STATUS))
841 return 0x4000000000000000ULL;
842 else
843 return 0;
846 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
848 float64 fa, fb;
850 fa = g_to_float64(a);
851 fb = g_to_float64(b);
853 if (float64_lt(fa, fb, &FP_STATUS))
854 return 0x4000000000000000ULL;
855 else
856 return 0;
859 uint64_t helper_cmpfeq (uint64_t a)
861 return !(a & 0x7FFFFFFFFFFFFFFFULL);
864 uint64_t helper_cmpfne (uint64_t a)
866 return (a & 0x7FFFFFFFFFFFFFFFULL);
869 uint64_t helper_cmpflt (uint64_t a)
871 return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
874 uint64_t helper_cmpfle (uint64_t a)
876 return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
879 uint64_t helper_cmpfgt (uint64_t a)
881 return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
884 uint64_t helper_cmpfge (uint64_t a)
886 return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
890 /* Floating point format conversion */
891 uint64_t helper_cvtts (uint64_t a)
893 float64 fa;
894 float32 fr;
896 fa = t_to_float64(a);
897 fr = float64_to_float32(fa, &FP_STATUS);
898 return float32_to_s(fr);
901 uint64_t helper_cvtst (uint64_t a)
903 float32 fa;
904 float64 fr;
906 fa = s_to_float32(a);
907 fr = float32_to_float64(fa, &FP_STATUS);
908 return float64_to_t(fr);
911 uint64_t helper_cvtqs (uint64_t a)
913 float32 fr = int64_to_float32(a, &FP_STATUS);
914 return float32_to_s(fr);
917 uint64_t helper_cvttq (uint64_t a)
919 float64 fa = t_to_float64(a);
920 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
923 uint64_t helper_cvtqt (uint64_t a)
925 float64 fr = int64_to_float64(a, &FP_STATUS);
926 return float64_to_t(fr);
929 uint64_t helper_cvtqf (uint64_t a)
931 float32 fr = int64_to_float32(a, &FP_STATUS);
932 return float32_to_f(fr);
935 uint64_t helper_cvtgf (uint64_t a)
937 float64 fa;
938 float32 fr;
940 fa = g_to_float64(a);
941 fr = float64_to_float32(fa, &FP_STATUS);
942 return float32_to_f(fr);
945 uint64_t helper_cvtgq (uint64_t a)
947 float64 fa = g_to_float64(a);
948 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
951 uint64_t helper_cvtqg (uint64_t a)
953 float64 fr;
954 fr = int64_to_float64(a, &FP_STATUS);
955 return float64_to_g(fr);
958 uint64_t helper_cvtlq (uint64_t a)
960 return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
963 static always_inline uint64_t __helper_cvtql (uint64_t a, int s, int v)
965 uint64_t r;
967 r = ((uint64_t)(a & 0xC0000000)) << 32;
968 r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
970 if (v && (int64_t)((int32_t)r) != (int64_t)r) {
971 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
973 if (s) {
974 /* TODO */
976 return r;
979 uint64_t helper_cvtql (uint64_t a)
981 return __helper_cvtql(a, 0, 0);
984 uint64_t helper_cvtqlv (uint64_t a)
986 return __helper_cvtql(a, 0, 1);
989 uint64_t helper_cvtqlsv (uint64_t a)
991 return __helper_cvtql(a, 1, 1);
994 /* PALcode support special instructions */
995 #if !defined (CONFIG_USER_ONLY)
996 void helper_hw_rei (void)
998 env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
999 env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
1000 /* XXX: re-enable interrupts and memory mapping */
1003 void helper_hw_ret (uint64_t a)
1005 env->pc = a & ~3;
1006 env->ipr[IPR_EXC_ADDR] = a & 1;
1007 /* XXX: re-enable interrupts and memory mapping */
1010 uint64_t helper_mfpr (int iprn, uint64_t val)
1012 uint64_t tmp;
1014 if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
1015 val = tmp;
1017 return val;
1020 void helper_mtpr (int iprn, uint64_t val)
1022 cpu_alpha_mtpr(env, iprn, val, NULL);
1025 void helper_set_alt_mode (void)
1027 env->saved_mode = env->ps & 0xC;
1028 env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
1031 void helper_restore_mode (void)
1033 env->ps = (env->ps & ~0xC) | env->saved_mode;
1036 #endif
1038 /*****************************************************************************/
1039 /* Softmmu support */
1040 #if !defined (CONFIG_USER_ONLY)
1042 /* XXX: the two following helpers are pure hacks.
1043 * Hopefully, we emulate the PALcode, then we should never see
1044 * HW_LD / HW_ST instructions.
1046 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
1048 uint64_t tlb_addr, physaddr;
1049 int index, mmu_idx;
1050 void *retaddr;
1052 mmu_idx = cpu_mmu_index(env);
1053 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1054 redo:
1055 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1056 if ((virtaddr & TARGET_PAGE_MASK) ==
1057 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1058 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1059 } else {
1060 /* the page is not in the TLB : fill it */
1061 retaddr = GETPC();
1062 tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1063 goto redo;
1065 return physaddr;
1068 uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1070 uint64_t tlb_addr, physaddr;
1071 int index, mmu_idx;
1072 void *retaddr;
1074 mmu_idx = cpu_mmu_index(env);
1075 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1076 redo:
1077 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1078 if ((virtaddr & TARGET_PAGE_MASK) ==
1079 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1080 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1081 } else {
1082 /* the page is not in the TLB : fill it */
1083 retaddr = GETPC();
1084 tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1085 goto redo;
1087 return physaddr;
1090 void helper_ldl_raw(uint64_t t0, uint64_t t1)
1092 ldl_raw(t1, t0);
1095 void helper_ldq_raw(uint64_t t0, uint64_t t1)
1097 ldq_raw(t1, t0);
1100 void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1102 env->lock = t1;
1103 ldl_raw(t1, t0);
1106 void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1108 env->lock = t1;
1109 ldl_raw(t1, t0);
1112 void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1114 ldl_kernel(t1, t0);
1117 void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1119 ldq_kernel(t1, t0);
1122 void helper_ldl_data(uint64_t t0, uint64_t t1)
1124 ldl_data(t1, t0);
1127 void helper_ldq_data(uint64_t t0, uint64_t t1)
1129 ldq_data(t1, t0);
1132 void helper_stl_raw(uint64_t t0, uint64_t t1)
1134 stl_raw(t1, t0);
1137 void helper_stq_raw(uint64_t t0, uint64_t t1)
1139 stq_raw(t1, t0);
1142 uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1144 uint64_t ret;
1146 if (t1 == env->lock) {
1147 stl_raw(t1, t0);
1148 ret = 0;
1149 } else
1150 ret = 1;
1152 env->lock = 1;
1154 return ret;
1157 uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1159 uint64_t ret;
1161 if (t1 == env->lock) {
1162 stq_raw(t1, t0);
1163 ret = 0;
1164 } else
1165 ret = 1;
1167 env->lock = 1;
1169 return ret;
1172 #define MMUSUFFIX _mmu
1174 #define SHIFT 0
1175 #include "softmmu_template.h"
1177 #define SHIFT 1
1178 #include "softmmu_template.h"
1180 #define SHIFT 2
1181 #include "softmmu_template.h"
1183 #define SHIFT 3
1184 #include "softmmu_template.h"
1186 /* try to fill the TLB and return an exception if error. If retaddr is
1187 NULL, it means that the function was called in C code (i.e. not
1188 from generated code or from helper.c) */
1189 /* XXX: fix it to restore all registers */
1190 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1192 TranslationBlock *tb;
1193 CPUState *saved_env;
1194 unsigned long pc;
1195 int ret;
1197 /* XXX: hack to restore env in all cases, even if not called from
1198 generated code */
1199 saved_env = env;
1200 env = cpu_single_env;
1201 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1202 if (!likely(ret == 0)) {
1203 if (likely(retaddr)) {
1204 /* now we have a real cpu fault */
1205 pc = (unsigned long)retaddr;
1206 tb = tb_find_pc(pc);
1207 if (likely(tb)) {
1208 /* the PC is inside the translated code. It means that we have
1209 a virtual CPU fault */
1210 cpu_restore_state(tb, env, pc, NULL);
1213 /* Exception index and error code are already set */
1214 cpu_loop_exit();
1216 env = saved_env;
1219 #endif