target-ppc: convert fp ops to TCG
[qemu/qemu-JZ.git] / target-ppc / op_helper.c
blobdaaceecf07ad964b780a9cde5d8c2a038fe450b4
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "helper.h"
24 #include "helper_regs.h"
25 #include "op_helper.h"
27 #define MEMSUFFIX _raw
28 #include "op_helper.h"
29 #include "op_helper_mem.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #define MEMSUFFIX _user
32 #include "op_helper.h"
33 #include "op_helper_mem.h"
34 #define MEMSUFFIX _kernel
35 #include "op_helper.h"
36 #include "op_helper_mem.h"
37 #define MEMSUFFIX _hypv
38 #include "op_helper.h"
39 #include "op_helper_mem.h"
40 #endif
42 //#define DEBUG_OP
43 //#define DEBUG_EXCEPTIONS
44 //#define DEBUG_SOFTWARE_TLB
46 /*****************************************************************************/
47 /* Exceptions processing helpers */
49 void do_raise_exception_err (uint32_t exception, int error_code)
51 #if 0
52 printf("Raise exception %3x code : %d\n", exception, error_code);
53 #endif
54 env->exception_index = exception;
55 env->error_code = error_code;
56 cpu_loop_exit();
59 void do_raise_exception (uint32_t exception)
61 do_raise_exception_err(exception, 0);
64 /*****************************************************************************/
65 /* Registers load and stores */
66 target_ulong helper_load_cr (void)
68 return (env->crf[0] << 28) |
69 (env->crf[1] << 24) |
70 (env->crf[2] << 20) |
71 (env->crf[3] << 16) |
72 (env->crf[4] << 12) |
73 (env->crf[5] << 8) |
74 (env->crf[6] << 4) |
75 (env->crf[7] << 0);
78 void helper_store_cr (target_ulong val, uint32_t mask)
80 int i, sh;
82 for (i = 0, sh = 7; i < 8; i++, sh--) {
83 if (mask & (1 << sh))
84 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
88 #if defined(TARGET_PPC64)
89 void do_store_pri (int prio)
91 env->spr[SPR_PPR] &= ~0x001C000000000000ULL;
92 env->spr[SPR_PPR] |= ((uint64_t)prio & 0x7) << 50;
94 #endif
96 target_ulong ppc_load_dump_spr (int sprn)
98 if (loglevel != 0) {
99 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
100 sprn, sprn, env->spr[sprn]);
103 return env->spr[sprn];
106 void ppc_store_dump_spr (int sprn, target_ulong val)
108 if (loglevel != 0) {
109 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
110 sprn, sprn, env->spr[sprn], val);
112 env->spr[sprn] = val;
115 /*****************************************************************************/
116 /* Fixed point operations helpers */
117 #if defined(TARGET_PPC64)
119 /* multiply high word */
120 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
122 uint64_t tl, th;
124 muls64(&tl, &th, arg1, arg2);
125 return th;
128 /* multiply high word unsigned */
129 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
131 uint64_t tl, th;
133 mulu64(&tl, &th, arg1, arg2);
134 return th;
137 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
139 int64_t th;
140 uint64_t tl;
142 muls64(&tl, (uint64_t *)&th, arg1, arg2);
143 /* If th != 0 && th != -1, then we had an overflow */
144 if (likely((uint64_t)(th + 1) <= 1)) {
145 env->xer &= ~(1 << XER_OV);
146 } else {
147 env->xer |= (1 << XER_OV) | (1 << XER_SO);
149 return (int64_t)tl;
151 #endif
153 target_ulong helper_cntlzw (target_ulong t)
155 return clz32(t);
158 #if defined(TARGET_PPC64)
159 target_ulong helper_cntlzd (target_ulong t)
161 return clz64(t);
163 #endif
165 /* shift right arithmetic helper */
166 target_ulong helper_sraw (target_ulong value, target_ulong shift)
168 int32_t ret;
170 if (likely(!(shift & 0x20))) {
171 if (likely((uint32_t)shift != 0)) {
172 shift &= 0x1f;
173 ret = (int32_t)value >> shift;
174 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
175 env->xer &= ~(1 << XER_CA);
176 } else {
177 env->xer |= (1 << XER_CA);
179 } else {
180 ret = (int32_t)value;
181 env->xer &= ~(1 << XER_CA);
183 } else {
184 ret = (int32_t)value >> 31;
185 if (ret) {
186 env->xer |= (1 << XER_CA);
187 } else {
188 env->xer &= ~(1 << XER_CA);
191 return (target_long)ret;
194 #if defined(TARGET_PPC64)
195 target_ulong helper_srad (target_ulong value, target_ulong shift)
197 int64_t ret;
199 if (likely(!(shift & 0x40))) {
200 if (likely((uint64_t)shift != 0)) {
201 shift &= 0x3f;
202 ret = (int64_t)value >> shift;
203 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
204 env->xer &= ~(1 << XER_CA);
205 } else {
206 env->xer |= (1 << XER_CA);
208 } else {
209 ret = (int64_t)value;
210 env->xer &= ~(1 << XER_CA);
212 } else {
213 ret = (int64_t)value >> 63;
214 if (ret) {
215 env->xer |= (1 << XER_CA);
216 } else {
217 env->xer &= ~(1 << XER_CA);
220 return ret;
222 #endif
224 target_ulong helper_popcntb (target_ulong val)
226 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
227 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
228 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
229 return val;
232 #if defined(TARGET_PPC64)
233 target_ulong helper_popcntb_64 (target_ulong val)
235 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
236 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
237 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
238 return val;
240 #endif
242 /*****************************************************************************/
243 /* Floating point operations helpers */
244 static always_inline int fpisneg (float64 d)
246 CPU_DoubleU u;
248 u.d = d;
250 return u.ll >> 63 != 0;
253 static always_inline int isden (float64 d)
255 CPU_DoubleU u;
257 u.d = d;
259 return ((u.ll >> 52) & 0x7FF) == 0;
262 static always_inline int iszero (float64 d)
264 CPU_DoubleU u;
266 u.d = d;
268 return (u.ll & ~0x8000000000000000ULL) == 0;
271 static always_inline int isinfinity (float64 d)
273 CPU_DoubleU u;
275 u.d = d;
277 return ((u.ll >> 52) & 0x7FF) == 0x7FF &&
278 (u.ll & 0x000FFFFFFFFFFFFFULL) == 0;
281 #ifdef CONFIG_SOFTFLOAT
282 static always_inline int isfinite (float64 d)
284 CPU_DoubleU u;
286 u.d = d;
288 return (((u.ll >> 52) & 0x7FF) != 0x7FF);
291 static always_inline int isnormal (float64 d)
293 CPU_DoubleU u;
295 u.d = d;
297 uint32_t exp = (u.ll >> 52) & 0x7FF;
298 return ((0 < exp) && (exp < 0x7FF));
300 #endif
302 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
304 CPU_DoubleU farg;
305 int isneg;
306 int ret;
307 farg.ll = arg;
308 isneg = fpisneg(farg.d);
309 if (unlikely(float64_is_nan(farg.d))) {
310 if (float64_is_signaling_nan(farg.d)) {
311 /* Signaling NaN: flags are undefined */
312 ret = 0x00;
313 } else {
314 /* Quiet NaN */
315 ret = 0x11;
317 } else if (unlikely(isinfinity(farg.d))) {
318 /* +/- infinity */
319 if (isneg)
320 ret = 0x09;
321 else
322 ret = 0x05;
323 } else {
324 if (iszero(farg.d)) {
325 /* +/- zero */
326 if (isneg)
327 ret = 0x12;
328 else
329 ret = 0x02;
330 } else {
331 if (isden(farg.d)) {
332 /* Denormalized numbers */
333 ret = 0x10;
334 } else {
335 /* Normalized numbers */
336 ret = 0x00;
338 if (isneg) {
339 ret |= 0x08;
340 } else {
341 ret |= 0x04;
345 if (set_fprf) {
346 /* We update FPSCR_FPRF */
347 env->fpscr &= ~(0x1F << FPSCR_FPRF);
348 env->fpscr |= ret << FPSCR_FPRF;
350 /* We just need fpcc to update Rc1 */
351 return ret & 0xF;
354 /* Floating-point invalid operations exception */
355 static always_inline uint64_t fload_invalid_op_excp (int op)
357 uint64_t ret = 0;
358 int ve;
360 ve = fpscr_ve;
361 if (op & POWERPC_EXCP_FP_VXSNAN) {
362 /* Operation on signaling NaN */
363 env->fpscr |= 1 << FPSCR_VXSNAN;
365 if (op & POWERPC_EXCP_FP_VXSOFT) {
366 /* Software-defined condition */
367 env->fpscr |= 1 << FPSCR_VXSOFT;
369 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
370 case POWERPC_EXCP_FP_VXISI:
371 /* Magnitude subtraction of infinities */
372 env->fpscr |= 1 << FPSCR_VXISI;
373 goto update_arith;
374 case POWERPC_EXCP_FP_VXIDI:
375 /* Division of infinity by infinity */
376 env->fpscr |= 1 << FPSCR_VXIDI;
377 goto update_arith;
378 case POWERPC_EXCP_FP_VXZDZ:
379 /* Division of zero by zero */
380 env->fpscr |= 1 << FPSCR_VXZDZ;
381 goto update_arith;
382 case POWERPC_EXCP_FP_VXIMZ:
383 /* Multiplication of zero by infinity */
384 env->fpscr |= 1 << FPSCR_VXIMZ;
385 goto update_arith;
386 case POWERPC_EXCP_FP_VXVC:
387 /* Ordered comparison of NaN */
388 env->fpscr |= 1 << FPSCR_VXVC;
389 env->fpscr &= ~(0xF << FPSCR_FPCC);
390 env->fpscr |= 0x11 << FPSCR_FPCC;
391 /* We must update the target FPR before raising the exception */
392 if (ve != 0) {
393 env->exception_index = POWERPC_EXCP_PROGRAM;
394 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
395 /* Update the floating-point enabled exception summary */
396 env->fpscr |= 1 << FPSCR_FEX;
397 /* Exception is differed */
398 ve = 0;
400 break;
401 case POWERPC_EXCP_FP_VXSQRT:
402 /* Square root of a negative number */
403 env->fpscr |= 1 << FPSCR_VXSQRT;
404 update_arith:
405 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
406 if (ve == 0) {
407 /* Set the result to quiet NaN */
408 ret = UINT64_MAX;
409 env->fpscr &= ~(0xF << FPSCR_FPCC);
410 env->fpscr |= 0x11 << FPSCR_FPCC;
412 break;
413 case POWERPC_EXCP_FP_VXCVI:
414 /* Invalid conversion */
415 env->fpscr |= 1 << FPSCR_VXCVI;
416 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
417 if (ve == 0) {
418 /* Set the result to quiet NaN */
419 ret = UINT64_MAX;
420 env->fpscr &= ~(0xF << FPSCR_FPCC);
421 env->fpscr |= 0x11 << FPSCR_FPCC;
423 break;
425 /* Update the floating-point invalid operation summary */
426 env->fpscr |= 1 << FPSCR_VX;
427 /* Update the floating-point exception summary */
428 env->fpscr |= 1 << FPSCR_FX;
429 if (ve != 0) {
430 /* Update the floating-point enabled exception summary */
431 env->fpscr |= 1 << FPSCR_FEX;
432 if (msr_fe0 != 0 || msr_fe1 != 0)
433 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
435 return ret;
438 static always_inline uint64_t float_zero_divide_excp (uint64_t arg1, uint64_t arg2)
440 env->fpscr |= 1 << FPSCR_ZX;
441 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
442 /* Update the floating-point exception summary */
443 env->fpscr |= 1 << FPSCR_FX;
444 if (fpscr_ze != 0) {
445 /* Update the floating-point enabled exception summary */
446 env->fpscr |= 1 << FPSCR_FEX;
447 if (msr_fe0 != 0 || msr_fe1 != 0) {
448 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
449 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
451 } else {
452 /* Set the result to infinity */
453 arg1 = ((arg1 ^ arg2) & 0x8000000000000000ULL);
454 arg1 |= 0x7FFULL << 52;
456 return arg1;
459 static always_inline void float_overflow_excp (void)
461 env->fpscr |= 1 << FPSCR_OX;
462 /* Update the floating-point exception summary */
463 env->fpscr |= 1 << FPSCR_FX;
464 if (fpscr_oe != 0) {
465 /* XXX: should adjust the result */
466 /* Update the floating-point enabled exception summary */
467 env->fpscr |= 1 << FPSCR_FEX;
468 /* We must update the target FPR before raising the exception */
469 env->exception_index = POWERPC_EXCP_PROGRAM;
470 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
471 } else {
472 env->fpscr |= 1 << FPSCR_XX;
473 env->fpscr |= 1 << FPSCR_FI;
477 static always_inline void float_underflow_excp (void)
479 env->fpscr |= 1 << FPSCR_UX;
480 /* Update the floating-point exception summary */
481 env->fpscr |= 1 << FPSCR_FX;
482 if (fpscr_ue != 0) {
483 /* XXX: should adjust the result */
484 /* Update the floating-point enabled exception summary */
485 env->fpscr |= 1 << FPSCR_FEX;
486 /* We must update the target FPR before raising the exception */
487 env->exception_index = POWERPC_EXCP_PROGRAM;
488 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
492 static always_inline void float_inexact_excp (void)
494 env->fpscr |= 1 << FPSCR_XX;
495 /* Update the floating-point exception summary */
496 env->fpscr |= 1 << FPSCR_FX;
497 if (fpscr_xe != 0) {
498 /* Update the floating-point enabled exception summary */
499 env->fpscr |= 1 << FPSCR_FEX;
500 /* We must update the target FPR before raising the exception */
501 env->exception_index = POWERPC_EXCP_PROGRAM;
502 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
506 static always_inline void fpscr_set_rounding_mode (void)
508 int rnd_type;
510 /* Set rounding mode */
511 switch (fpscr_rn) {
512 case 0:
513 /* Best approximation (round to nearest) */
514 rnd_type = float_round_nearest_even;
515 break;
516 case 1:
517 /* Smaller magnitude (round toward zero) */
518 rnd_type = float_round_to_zero;
519 break;
520 case 2:
521 /* Round toward +infinite */
522 rnd_type = float_round_up;
523 break;
524 default:
525 case 3:
526 /* Round toward -infinite */
527 rnd_type = float_round_down;
528 break;
530 set_float_rounding_mode(rnd_type, &env->fp_status);
533 void helper_fpscr_setbit (uint32_t bit)
535 int prev;
537 prev = (env->fpscr >> bit) & 1;
538 env->fpscr |= 1 << bit;
539 if (prev == 0) {
540 switch (bit) {
541 case FPSCR_VX:
542 env->fpscr |= 1 << FPSCR_FX;
543 if (fpscr_ve)
544 goto raise_ve;
545 case FPSCR_OX:
546 env->fpscr |= 1 << FPSCR_FX;
547 if (fpscr_oe)
548 goto raise_oe;
549 break;
550 case FPSCR_UX:
551 env->fpscr |= 1 << FPSCR_FX;
552 if (fpscr_ue)
553 goto raise_ue;
554 break;
555 case FPSCR_ZX:
556 env->fpscr |= 1 << FPSCR_FX;
557 if (fpscr_ze)
558 goto raise_ze;
559 break;
560 case FPSCR_XX:
561 env->fpscr |= 1 << FPSCR_FX;
562 if (fpscr_xe)
563 goto raise_xe;
564 break;
565 case FPSCR_VXSNAN:
566 case FPSCR_VXISI:
567 case FPSCR_VXIDI:
568 case FPSCR_VXZDZ:
569 case FPSCR_VXIMZ:
570 case FPSCR_VXVC:
571 case FPSCR_VXSOFT:
572 case FPSCR_VXSQRT:
573 case FPSCR_VXCVI:
574 env->fpscr |= 1 << FPSCR_VX;
575 env->fpscr |= 1 << FPSCR_FX;
576 if (fpscr_ve != 0)
577 goto raise_ve;
578 break;
579 case FPSCR_VE:
580 if (fpscr_vx != 0) {
581 raise_ve:
582 env->error_code = POWERPC_EXCP_FP;
583 if (fpscr_vxsnan)
584 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
585 if (fpscr_vxisi)
586 env->error_code |= POWERPC_EXCP_FP_VXISI;
587 if (fpscr_vxidi)
588 env->error_code |= POWERPC_EXCP_FP_VXIDI;
589 if (fpscr_vxzdz)
590 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
591 if (fpscr_vximz)
592 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
593 if (fpscr_vxvc)
594 env->error_code |= POWERPC_EXCP_FP_VXVC;
595 if (fpscr_vxsoft)
596 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
597 if (fpscr_vxsqrt)
598 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
599 if (fpscr_vxcvi)
600 env->error_code |= POWERPC_EXCP_FP_VXCVI;
601 goto raise_excp;
603 break;
604 case FPSCR_OE:
605 if (fpscr_ox != 0) {
606 raise_oe:
607 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
608 goto raise_excp;
610 break;
611 case FPSCR_UE:
612 if (fpscr_ux != 0) {
613 raise_ue:
614 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
615 goto raise_excp;
617 break;
618 case FPSCR_ZE:
619 if (fpscr_zx != 0) {
620 raise_ze:
621 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
622 goto raise_excp;
624 break;
625 case FPSCR_XE:
626 if (fpscr_xx != 0) {
627 raise_xe:
628 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
629 goto raise_excp;
631 break;
632 case FPSCR_RN1:
633 case FPSCR_RN:
634 fpscr_set_rounding_mode();
635 break;
636 default:
637 break;
638 raise_excp:
639 /* Update the floating-point enabled exception summary */
640 env->fpscr |= 1 << FPSCR_FEX;
641 /* We have to update Rc1 before raising the exception */
642 env->exception_index = POWERPC_EXCP_PROGRAM;
643 break;
648 void helper_store_fpscr (uint64_t arg, uint32_t mask)
651 * We use only the 32 LSB of the incoming fpr
653 uint32_t prev, new;
654 int i;
656 prev = env->fpscr;
657 new = (uint32_t)arg;
658 new &= ~0x90000000;
659 new |= prev & 0x90000000;
660 for (i = 0; i < 7; i++) {
661 if (mask & (1 << i)) {
662 env->fpscr &= ~(0xF << (4 * i));
663 env->fpscr |= new & (0xF << (4 * i));
666 /* Update VX and FEX */
667 if (fpscr_ix != 0)
668 env->fpscr |= 1 << FPSCR_VX;
669 else
670 env->fpscr &= ~(1 << FPSCR_VX);
671 if ((fpscr_ex & fpscr_eex) != 0) {
672 env->fpscr |= 1 << FPSCR_FEX;
673 env->exception_index = POWERPC_EXCP_PROGRAM;
674 /* XXX: we should compute it properly */
675 env->error_code = POWERPC_EXCP_FP;
677 else
678 env->fpscr &= ~(1 << FPSCR_FEX);
679 fpscr_set_rounding_mode();
682 void helper_float_check_status (void)
684 #ifdef CONFIG_SOFTFLOAT
685 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
686 (env->error_code & POWERPC_EXCP_FP)) {
687 /* Differred floating-point exception after target FPR update */
688 if (msr_fe0 != 0 || msr_fe1 != 0)
689 do_raise_exception_err(env->exception_index, env->error_code);
690 } else if (env->fp_status.float_exception_flags & float_flag_overflow) {
691 float_overflow_excp();
692 } else if (env->fp_status.float_exception_flags & float_flag_underflow) {
693 float_underflow_excp();
694 } else if (env->fp_status.float_exception_flags & float_flag_inexact) {
695 float_inexact_excp();
697 #else
698 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
699 (env->error_code & POWERPC_EXCP_FP)) {
700 /* Differred floating-point exception after target FPR update */
701 if (msr_fe0 != 0 || msr_fe1 != 0)
702 do_raise_exception_err(env->exception_index, env->error_code);
704 RETURN();
705 #endif
708 #ifdef CONFIG_SOFTFLOAT
709 void helper_reset_fpstatus (void)
711 env->fp_status.float_exception_flags = 0;
713 #endif
715 /* fadd - fadd. */
716 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
718 CPU_DoubleU farg1, farg2;
720 farg1.ll = arg1;
721 farg2.ll = arg2;
722 #if USE_PRECISE_EMULATION
723 if (unlikely(float64_is_signaling_nan(farg1.d) ||
724 float64_is_signaling_nan(farg2.d))) {
725 /* sNaN addition */
726 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
727 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
728 fpisneg(farg1.d) == fpisneg(farg2.d))) {
729 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
730 } else {
731 /* Magnitude subtraction of infinities */
732 farg1.ll == fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
734 #else
735 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
736 #endif
737 return farg1.ll;
740 /* fsub - fsub. */
741 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
743 CPU_DoubleU farg1, farg2;
745 farg1.ll = arg1;
746 farg2.ll = arg2;
747 #if USE_PRECISE_EMULATION
749 if (unlikely(float64_is_signaling_nan(farg1.d) ||
750 float64_is_signaling_nan(farg2.d))) {
751 /* sNaN subtraction */
752 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
753 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
754 fpisneg(farg1.d) != fpisneg(farg2.d))) {
755 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
756 } else {
757 /* Magnitude subtraction of infinities */
758 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
761 #else
762 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
763 #endif
764 return farg1.ll;
767 /* fmul - fmul. */
768 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
770 CPU_DoubleU farg1, farg2;
772 farg1.ll = arg1;
773 farg2.ll = arg2;
774 #if USE_PRECISE_EMULATION
775 if (unlikely(float64_is_signaling_nan(farg1.d) ||
776 float64_is_signaling_nan(farg2.d))) {
777 /* sNaN multiplication */
778 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
779 } else if (unlikely((isinfinity(farg1.d) && iszero(farg2.d)) ||
780 (iszero(farg1.d) && isinfinity(farg2.d)))) {
781 /* Multiplication of zero by infinity */
782 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
783 } else {
784 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
787 #else
788 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
789 #endif
790 return farg1.ll;
793 /* fdiv - fdiv. */
794 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
796 CPU_DoubleU farg1, farg2;
798 farg1.ll = arg1;
799 farg2.ll = arg2;
800 #if USE_PRECISE_EMULATION
801 if (unlikely(float64_is_signaling_nan(farg1.d) ||
802 float64_is_signaling_nan(farg2.d))) {
803 /* sNaN division */
804 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
805 } else if (unlikely(isinfinity(farg1.d) && isinfinity(farg2.d))) {
806 /* Division of infinity by infinity */
807 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
808 } else if (unlikely(iszero(farg2.d))) {
809 if (iszero(farg1.d)) {
810 /* Division of zero by zero */
811 farg1.ll fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
812 } else {
813 /* Division by zero */
814 farg1.ll = float_zero_divide_excp(farg1.d, farg2.d);
816 } else {
817 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
819 #else
820 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
821 #endif
822 return farg1.ll;
825 /* fabs */
826 uint64_t helper_fabs (uint64_t arg)
828 CPU_DoubleU farg;
830 farg.ll = arg;
831 farg.d = float64_abs(farg.d);
832 return farg.ll;
835 /* fnabs */
836 uint64_t helper_fnabs (uint64_t arg)
838 CPU_DoubleU farg;
840 farg.ll = arg;
841 farg.d = float64_abs(farg.d);
842 farg.d = float64_chs(farg.d);
843 return farg.ll;
846 /* fneg */
847 uint64_t helper_fneg (uint64_t arg)
849 CPU_DoubleU farg;
851 farg.ll = arg;
852 farg.d = float64_chs(farg.d);
853 return farg.ll;
856 /* fctiw - fctiw. */
857 uint64_t helper_fctiw (uint64_t arg)
859 CPU_DoubleU farg;
860 farg.ll = arg;
862 if (unlikely(float64_is_signaling_nan(farg.d))) {
863 /* sNaN conversion */
864 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
865 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
866 /* qNan / infinity conversion */
867 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
868 } else {
869 farg.ll = float64_to_int32(farg.d, &env->fp_status);
870 #if USE_PRECISE_EMULATION
871 /* XXX: higher bits are not supposed to be significant.
872 * to make tests easier, return the same as a real PowerPC 750
874 farg.ll |= 0xFFF80000ULL << 32;
875 #endif
877 return farg.ll;
880 /* fctiwz - fctiwz. */
881 uint64_t helper_fctiwz (uint64_t arg)
883 CPU_DoubleU farg;
884 farg.ll = arg;
886 if (unlikely(float64_is_signaling_nan(farg.d))) {
887 /* sNaN conversion */
888 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
889 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
890 /* qNan / infinity conversion */
891 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
892 } else {
893 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
894 #if USE_PRECISE_EMULATION
895 /* XXX: higher bits are not supposed to be significant.
896 * to make tests easier, return the same as a real PowerPC 750
898 farg.ll |= 0xFFF80000ULL << 32;
899 #endif
901 return farg.ll;
904 #if defined(TARGET_PPC64)
905 /* fcfid - fcfid. */
906 uint64_t helper_fcfid (uint64_t arg)
908 CPU_DoubleU farg;
909 farg.d = int64_to_float64(arg, &env->fp_status);
910 return farg.ll;
913 /* fctid - fctid. */
914 uint64_t helper_fctid (uint64_t arg)
916 CPU_DoubleU farg;
917 farg.ll = arg;
919 if (unlikely(float64_is_signaling_nan(farg.d))) {
920 /* sNaN conversion */
921 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
922 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
923 /* qNan / infinity conversion */
924 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
925 } else {
926 farg.ll = float64_to_int64(farg.d, &env->fp_status);
928 return farg.ll;
931 /* fctidz - fctidz. */
932 uint64_t helper_fctidz (uint64_t arg)
934 CPU_DoubleU farg;
935 farg.ll = arg;
937 if (unlikely(float64_is_signaling_nan(farg.d))) {
938 /* sNaN conversion */
939 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
940 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
941 /* qNan / infinity conversion */
942 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
943 } else {
944 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
946 return farg.ll;
949 #endif
951 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
953 CPU_DoubleU farg;
954 farg.ll = arg;
956 if (unlikely(float64_is_signaling_nan(farg.d))) {
957 /* sNaN round */
958 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
959 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
960 /* qNan / infinity round */
961 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
962 } else {
963 set_float_rounding_mode(rounding_mode, &env->fp_status);
964 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
965 /* Restore rounding mode from FPSCR */
966 fpscr_set_rounding_mode();
968 return farg.ll;
971 uint64_t helper_frin (uint64_t arg)
973 return do_fri(arg, float_round_nearest_even);
976 uint64_t helper_friz (uint64_t arg)
978 return do_fri(arg, float_round_to_zero);
981 uint64_t helper_frip (uint64_t arg)
983 return do_fri(arg, float_round_up);
986 uint64_t helper_frim (uint64_t arg)
988 return do_fri(arg, float_round_down);
991 /* fmadd - fmadd. */
992 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
994 CPU_DoubleU farg1, farg2, farg3;
996 farg1.ll = arg1;
997 farg2.ll = arg2;
998 farg3.ll = arg3;
999 #if USE_PRECISE_EMULATION
1000 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1001 float64_is_signaling_nan(farg2.d) ||
1002 float64_is_signaling_nan(farg3.d))) {
1003 /* sNaN operation */
1004 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1005 } else {
1006 #ifdef FLOAT128
1007 /* This is the way the PowerPC specification defines it */
1008 float128 ft0_128, ft1_128;
1010 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1011 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1012 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1013 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1014 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1015 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1016 #else
1017 /* This is OK on x86 hosts */
1018 farg1.d = (farg1.d * farg2.d) + farg3.d;
1019 #endif
1021 #else
1022 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1023 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1024 #endif
1025 return farg1.ll;
1028 /* fmsub - fmsub. */
1029 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1031 CPU_DoubleU farg1, farg2, farg3;
1033 farg1.ll = arg1;
1034 farg2.ll = arg2;
1035 farg3.ll = arg3;
1036 #if USE_PRECISE_EMULATION
1037 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1038 float64_is_signaling_nan(farg2.d) ||
1039 float64_is_signaling_nan(farg3.d))) {
1040 /* sNaN operation */
1041 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1042 } else {
1043 #ifdef FLOAT128
1044 /* This is the way the PowerPC specification defines it */
1045 float128 ft0_128, ft1_128;
1047 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1048 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1049 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1050 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1051 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1052 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1053 #else
1054 /* This is OK on x86 hosts */
1055 farg1.d = (farg1.d * farg2.d) - farg3.d;
1056 #endif
1058 #else
1059 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1060 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1061 #endif
1062 return farg1.ll;
1065 /* fnmadd - fnmadd. */
1066 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1068 CPU_DoubleU farg1, farg2, farg3;
1070 farg1.ll = arg1;
1071 farg2.ll = arg2;
1072 farg3.ll = arg3;
1074 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1075 float64_is_signaling_nan(farg2.d) ||
1076 float64_is_signaling_nan(farg3.d))) {
1077 /* sNaN operation */
1078 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1079 } else {
1080 #if USE_PRECISE_EMULATION
1081 #ifdef FLOAT128
1082 /* This is the way the PowerPC specification defines it */
1083 float128 ft0_128, ft1_128;
1085 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1086 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1087 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1088 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1089 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1090 farg1.d= float128_to_float64(ft0_128, &env->fp_status);
1091 #else
1092 /* This is OK on x86 hosts */
1093 farg1.d = (farg1.d * farg2.d) + farg3.d;
1094 #endif
1095 #else
1096 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1097 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1098 #endif
1099 if (likely(!isnan(farg1.d)))
1100 farg1.d = float64_chs(farg1.d);
1102 return farg1.ll;
1105 /* fnmsub - fnmsub. */
1106 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1108 CPU_DoubleU farg1, farg2, farg3;
1110 farg1.ll = arg1;
1111 farg2.ll = arg2;
1112 farg3.ll = arg3;
1114 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1115 float64_is_signaling_nan(farg2.d) ||
1116 float64_is_signaling_nan(farg3.d))) {
1117 /* sNaN operation */
1118 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1119 } else {
1120 #if USE_PRECISE_EMULATION
1121 #ifdef FLOAT128
1122 /* This is the way the PowerPC specification defines it */
1123 float128 ft0_128, ft1_128;
1125 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1126 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1127 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1128 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1129 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1130 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1131 #else
1132 /* This is OK on x86 hosts */
1133 farg1.d = (farg1.d * farg2.d) - farg3.d;
1134 #endif
1135 #else
1136 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1137 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1138 #endif
1139 if (likely(!isnan(farg1.d)))
1140 farg1.d = float64_chs(farg1.d);
1142 return farg1.ll;
1146 /* frsp - frsp. */
1147 uint64_t helper_frsp (uint64_t arg)
1149 CPU_DoubleU farg;
1150 farg.ll = arg;
1152 #if USE_PRECISE_EMULATION
1153 if (unlikely(float64_is_signaling_nan(farg.d))) {
1154 /* sNaN square root */
1155 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1156 } else {
1157 fard.d = float64_to_float32(farg.d, &env->fp_status);
1159 #else
1160 farg.d = float64_to_float32(farg.d, &env->fp_status);
1161 #endif
1162 return farg.ll;
1165 /* fsqrt - fsqrt. */
1166 uint64_t helper_fsqrt (uint64_t arg)
1168 CPU_DoubleU farg;
1169 farg.ll = arg;
1171 if (unlikely(float64_is_signaling_nan(farg.d))) {
1172 /* sNaN square root */
1173 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1174 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1175 /* Square root of a negative nonzero number */
1176 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1177 } else {
1178 farg.d = float64_sqrt(farg.d, &env->fp_status);
1180 return farg.ll;
1183 /* fre - fre. */
1184 uint64_t helper_fre (uint64_t arg)
1186 CPU_DoubleU farg;
1187 farg.ll = arg;
1189 if (unlikely(float64_is_signaling_nan(farg.d))) {
1190 /* sNaN reciprocal */
1191 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1192 } else if (unlikely(iszero(farg.d))) {
1193 /* Zero reciprocal */
1194 farg.ll = float_zero_divide_excp(1.0, farg.d);
1195 } else if (likely(isnormal(farg.d))) {
1196 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1197 } else {
1198 if (farg.ll == 0x8000000000000000ULL) {
1199 farg.ll = 0xFFF0000000000000ULL;
1200 } else if (farg.ll == 0x0000000000000000ULL) {
1201 farg.ll = 0x7FF0000000000000ULL;
1202 } else if (isnan(farg.d)) {
1203 farg.ll = 0x7FF8000000000000ULL;
1204 } else if (fpisneg(farg.d)) {
1205 farg.ll = 0x8000000000000000ULL;
1206 } else {
1207 farg.ll = 0x0000000000000000ULL;
1210 return farg.d;
1213 /* fres - fres. */
1214 uint64_t helper_fres (uint64_t arg)
1216 CPU_DoubleU farg;
1217 farg.ll = arg;
1219 if (unlikely(float64_is_signaling_nan(farg.d))) {
1220 /* sNaN reciprocal */
1221 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1222 } else if (unlikely(iszero(farg.d))) {
1223 /* Zero reciprocal */
1224 farg.ll = float_zero_divide_excp(1.0, farg.d);
1225 } else if (likely(isnormal(farg.d))) {
1226 #if USE_PRECISE_EMULATION
1227 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1228 farg.d = float64_to_float32(farg.d, &env->fp_status);
1229 #else
1230 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1231 #endif
1232 } else {
1233 if (farg.ll == 0x8000000000000000ULL) {
1234 farg.ll = 0xFFF0000000000000ULL;
1235 } else if (farg.ll == 0x0000000000000000ULL) {
1236 farg.ll = 0x7FF0000000000000ULL;
1237 } else if (isnan(farg.d)) {
1238 farg.ll = 0x7FF8000000000000ULL;
1239 } else if (fpisneg(farg.d)) {
1240 farg.ll = 0x8000000000000000ULL;
1241 } else {
1242 farg.ll = 0x0000000000000000ULL;
1245 return farg.ll;
1248 /* frsqrte - frsqrte. */
1249 uint64_t helper_frsqrte (uint64_t arg)
1251 CPU_DoubleU farg;
1252 farg.ll = arg;
1254 if (unlikely(float64_is_signaling_nan(farg.d))) {
1255 /* sNaN reciprocal square root */
1256 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1257 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1258 /* Reciprocal square root of a negative nonzero number */
1259 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1260 } else if (likely(isnormal(farg.d))) {
1261 farg.d = float64_sqrt(farg.d, &env->fp_status);
1262 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1263 } else {
1264 if (farg.ll == 0x8000000000000000ULL) {
1265 farg.ll = 0xFFF0000000000000ULL;
1266 } else if (farg.ll == 0x0000000000000000ULL) {
1267 farg.ll = 0x7FF0000000000000ULL;
1268 } else if (isnan(farg.d)) {
1269 farg.ll |= 0x000FFFFFFFFFFFFFULL;
1270 } else if (fpisneg(farg.d)) {
1271 farg.ll = 0x7FF8000000000000ULL;
1272 } else {
1273 farg.ll = 0x0000000000000000ULL;
1276 return farg.ll;
1279 /* fsel - fsel. */
1280 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1282 CPU_DoubleU farg1, farg2, farg3;
1284 farg1.ll = arg1;
1285 farg2.ll = arg2;
1286 farg3.ll = arg3;
1288 if (!fpisneg(farg1.d) || iszero(farg1.d))
1289 return farg2.ll;
1290 else
1291 return farg2.ll;
1294 uint32_t helper_fcmpu (uint64_t arg1, uint64_t arg2)
1296 CPU_DoubleU farg1, farg2;
1297 uint32_t ret = 0;
1298 farg1.ll = arg1;
1299 farg2.ll = arg2;
1301 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1302 float64_is_signaling_nan(farg2.d))) {
1303 /* sNaN comparison */
1304 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1305 } else {
1306 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1307 ret = 0x08UL;
1308 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1309 ret = 0x04UL;
1310 } else {
1311 ret = 0x02UL;
1314 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1315 env->fpscr |= ret << FPSCR_FPRF;
1316 return ret;
1319 uint32_t helper_fcmpo (uint64_t arg1, uint64_t arg2)
1321 CPU_DoubleU farg1, farg2;
1322 uint32_t ret = 0;
1323 farg1.ll = arg1;
1324 farg2.ll = arg2;
1326 if (unlikely(float64_is_nan(farg1.d) ||
1327 float64_is_nan(farg2.d))) {
1328 if (float64_is_signaling_nan(farg1.d) ||
1329 float64_is_signaling_nan(farg2.d)) {
1330 /* sNaN comparison */
1331 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1332 POWERPC_EXCP_FP_VXVC);
1333 } else {
1334 /* qNaN comparison */
1335 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1337 } else {
1338 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1339 ret = 0x08UL;
1340 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1341 ret = 0x04UL;
1342 } else {
1343 ret = 0x02UL;
1346 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1347 env->fpscr |= ret << FPSCR_FPRF;
1348 return ret;
1351 #if !defined (CONFIG_USER_ONLY)
1352 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
1354 void do_store_msr (void)
1356 T0 = hreg_store_msr(env, T0, 0);
1357 if (T0 != 0) {
1358 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1359 do_raise_exception(T0);
1363 static always_inline void __do_rfi (target_ulong nip, target_ulong msr,
1364 target_ulong msrm, int keep_msrh)
1366 #if defined(TARGET_PPC64)
1367 if (msr & (1ULL << MSR_SF)) {
1368 nip = (uint64_t)nip;
1369 msr &= (uint64_t)msrm;
1370 } else {
1371 nip = (uint32_t)nip;
1372 msr = (uint32_t)(msr & msrm);
1373 if (keep_msrh)
1374 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1376 #else
1377 nip = (uint32_t)nip;
1378 msr &= (uint32_t)msrm;
1379 #endif
1380 /* XXX: beware: this is false if VLE is supported */
1381 env->nip = nip & ~((target_ulong)0x00000003);
1382 hreg_store_msr(env, msr, 1);
1383 #if defined (DEBUG_OP)
1384 cpu_dump_rfi(env->nip, env->msr);
1385 #endif
1386 /* No need to raise an exception here,
1387 * as rfi is always the last insn of a TB
1389 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1392 void do_rfi (void)
1394 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1395 ~((target_ulong)0xFFFF0000), 1);
1398 #if defined(TARGET_PPC64)
1399 void do_rfid (void)
1401 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1402 ~((target_ulong)0xFFFF0000), 0);
1405 void do_hrfid (void)
1407 __do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1408 ~((target_ulong)0xFFFF0000), 0);
1410 #endif
1411 #endif
1413 void do_tw (int flags)
1415 if (!likely(!(((int32_t)T0 < (int32_t)T1 && (flags & 0x10)) ||
1416 ((int32_t)T0 > (int32_t)T1 && (flags & 0x08)) ||
1417 ((int32_t)T0 == (int32_t)T1 && (flags & 0x04)) ||
1418 ((uint32_t)T0 < (uint32_t)T1 && (flags & 0x02)) ||
1419 ((uint32_t)T0 > (uint32_t)T1 && (flags & 0x01))))) {
1420 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1424 #if defined(TARGET_PPC64)
1425 void do_td (int flags)
1427 if (!likely(!(((int64_t)T0 < (int64_t)T1 && (flags & 0x10)) ||
1428 ((int64_t)T0 > (int64_t)T1 && (flags & 0x08)) ||
1429 ((int64_t)T0 == (int64_t)T1 && (flags & 0x04)) ||
1430 ((uint64_t)T0 < (uint64_t)T1 && (flags & 0x02)) ||
1431 ((uint64_t)T0 > (uint64_t)T1 && (flags & 0x01)))))
1432 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1434 #endif
1436 /*****************************************************************************/
1437 /* PowerPC 601 specific instructions (POWER bridge) */
1438 void do_POWER_abso (void)
1440 if ((int32_t)T0 == INT32_MIN) {
1441 T0 = INT32_MAX;
1442 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1443 } else if ((int32_t)T0 < 0) {
1444 T0 = -T0;
1445 env->xer &= ~(1 << XER_OV);
1446 } else {
1447 env->xer &= ~(1 << XER_OV);
1451 void do_POWER_clcs (void)
1453 switch (T0) {
1454 case 0x0CUL:
1455 /* Instruction cache line size */
1456 T0 = env->icache_line_size;
1457 break;
1458 case 0x0DUL:
1459 /* Data cache line size */
1460 T0 = env->dcache_line_size;
1461 break;
1462 case 0x0EUL:
1463 /* Minimum cache line size */
1464 T0 = env->icache_line_size < env->dcache_line_size ?
1465 env->icache_line_size : env->dcache_line_size;
1466 break;
1467 case 0x0FUL:
1468 /* Maximum cache line size */
1469 T0 = env->icache_line_size > env->dcache_line_size ?
1470 env->icache_line_size : env->dcache_line_size;
1471 break;
1472 default:
1473 /* Undefined */
1474 break;
1478 void do_POWER_div (void)
1480 uint64_t tmp;
1482 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1483 (int32_t)T1 == 0) {
1484 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1485 env->spr[SPR_MQ] = 0;
1486 } else {
1487 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1488 env->spr[SPR_MQ] = tmp % T1;
1489 T0 = tmp / (int32_t)T1;
1493 void do_POWER_divo (void)
1495 int64_t tmp;
1497 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1498 (int32_t)T1 == 0) {
1499 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1500 env->spr[SPR_MQ] = 0;
1501 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1502 } else {
1503 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1504 env->spr[SPR_MQ] = tmp % T1;
1505 tmp /= (int32_t)T1;
1506 if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
1507 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1508 } else {
1509 env->xer &= ~(1 << XER_OV);
1511 T0 = tmp;
1515 void do_POWER_divs (void)
1517 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1518 (int32_t)T1 == 0) {
1519 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1520 env->spr[SPR_MQ] = 0;
1521 } else {
1522 env->spr[SPR_MQ] = T0 % T1;
1523 T0 = (int32_t)T0 / (int32_t)T1;
1527 void do_POWER_divso (void)
1529 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1530 (int32_t)T1 == 0) {
1531 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1532 env->spr[SPR_MQ] = 0;
1533 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1534 } else {
1535 T0 = (int32_t)T0 / (int32_t)T1;
1536 env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
1537 env->xer &= ~(1 << XER_OV);
1541 void do_POWER_dozo (void)
1543 if ((int32_t)T1 > (int32_t)T0) {
1544 T2 = T0;
1545 T0 = T1 - T0;
1546 if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
1547 ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
1548 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1549 } else {
1550 env->xer &= ~(1 << XER_OV);
1552 } else {
1553 T0 = 0;
1554 env->xer &= ~(1 << XER_OV);
1558 void do_POWER_maskg (void)
1560 uint32_t ret;
1562 if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
1563 ret = UINT32_MAX;
1564 } else {
1565 ret = (UINT32_MAX >> ((uint32_t)T0)) ^
1566 ((UINT32_MAX >> ((uint32_t)T1)) >> 1);
1567 if ((uint32_t)T0 > (uint32_t)T1)
1568 ret = ~ret;
1570 T0 = ret;
1573 void do_POWER_mulo (void)
1575 uint64_t tmp;
1577 tmp = (uint64_t)T0 * (uint64_t)T1;
1578 env->spr[SPR_MQ] = tmp >> 32;
1579 T0 = tmp;
1580 if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) {
1581 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1582 } else {
1583 env->xer &= ~(1 << XER_OV);
1587 #if !defined (CONFIG_USER_ONLY)
1588 void do_POWER_rac (void)
1590 mmu_ctx_t ctx;
1591 int nb_BATs;
1593 /* We don't have to generate many instances of this instruction,
1594 * as rac is supervisor only.
1596 /* XXX: FIX THIS: Pretend we have no BAT */
1597 nb_BATs = env->nb_BATs;
1598 env->nb_BATs = 0;
1599 if (get_physical_address(env, &ctx, T0, 0, ACCESS_INT) == 0)
1600 T0 = ctx.raddr;
1601 env->nb_BATs = nb_BATs;
1604 void do_POWER_rfsvc (void)
1606 __do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1609 void do_store_hid0_601 (void)
1611 uint32_t hid0;
1613 hid0 = env->spr[SPR_HID0];
1614 if ((T0 ^ hid0) & 0x00000008) {
1615 /* Change current endianness */
1616 env->hflags &= ~(1 << MSR_LE);
1617 env->hflags_nmsr &= ~(1 << MSR_LE);
1618 env->hflags_nmsr |= (1 << MSR_LE) & (((T0 >> 3) & 1) << MSR_LE);
1619 env->hflags |= env->hflags_nmsr;
1620 if (loglevel != 0) {
1621 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
1622 __func__, T0 & 0x8 ? 'l' : 'b', env->hflags);
1625 env->spr[SPR_HID0] = T0;
1627 #endif
1629 /*****************************************************************************/
1630 /* 602 specific instructions */
1631 /* mfrom is the most crazy instruction ever seen, imho ! */
1632 /* Real implementation uses a ROM table. Do the same */
1633 #define USE_MFROM_ROM_TABLE
1634 void do_op_602_mfrom (void)
1636 if (likely(T0 < 602)) {
1637 #if defined(USE_MFROM_ROM_TABLE)
1638 #include "mfrom_table.c"
1639 T0 = mfrom_ROM_table[T0];
1640 #else
1641 double d;
1642 /* Extremly decomposed:
1643 * -T0 / 256
1644 * T0 = 256 * log10(10 + 1.0) + 0.5
1646 d = T0;
1647 d = float64_div(d, 256, &env->fp_status);
1648 d = float64_chs(d);
1649 d = exp10(d); // XXX: use float emulation function
1650 d = float64_add(d, 1.0, &env->fp_status);
1651 d = log10(d); // XXX: use float emulation function
1652 d = float64_mul(d, 256, &env->fp_status);
1653 d = float64_add(d, 0.5, &env->fp_status);
1654 T0 = float64_round_to_int(d, &env->fp_status);
1655 #endif
1656 } else {
1657 T0 = 0;
1661 /*****************************************************************************/
1662 /* Embedded PowerPC specific helpers */
1664 /* XXX: to be improved to check access rights when in user-mode */
1665 void do_load_dcr (void)
1667 target_ulong val;
1669 if (unlikely(env->dcr_env == NULL)) {
1670 if (loglevel != 0) {
1671 fprintf(logfile, "No DCR environment\n");
1673 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1674 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1675 } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) {
1676 if (loglevel != 0) {
1677 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1679 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1680 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1681 } else {
1682 T0 = val;
1686 void do_store_dcr (void)
1688 if (unlikely(env->dcr_env == NULL)) {
1689 if (loglevel != 0) {
1690 fprintf(logfile, "No DCR environment\n");
1692 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1693 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1694 } else if (unlikely(ppc_dcr_write(env->dcr_env, T0, T1) != 0)) {
1695 if (loglevel != 0) {
1696 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1698 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1699 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1703 #if !defined(CONFIG_USER_ONLY)
1704 void do_40x_rfci (void)
1706 __do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1707 ~((target_ulong)0xFFFF0000), 0);
1710 void do_rfci (void)
1712 __do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1713 ~((target_ulong)0x3FFF0000), 0);
1716 void do_rfdi (void)
1718 __do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1719 ~((target_ulong)0x3FFF0000), 0);
1722 void do_rfmci (void)
1724 __do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1725 ~((target_ulong)0x3FFF0000), 0);
1728 void do_load_403_pb (int num)
1730 T0 = env->pb[num];
1733 void do_store_403_pb (int num)
1735 if (likely(env->pb[num] != T0)) {
1736 env->pb[num] = T0;
1737 /* Should be optimized */
1738 tlb_flush(env, 1);
1741 #endif
1743 /* 440 specific */
1744 void do_440_dlmzb (void)
1746 target_ulong mask;
1747 int i;
1749 i = 1;
1750 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1751 if ((T0 & mask) == 0)
1752 goto done;
1753 i++;
1755 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1756 if ((T1 & mask) == 0)
1757 break;
1758 i++;
1760 done:
1761 T0 = i;
1764 /* SPE extension helpers */
1765 /* Use a table to make this quicker */
1766 static uint8_t hbrev[16] = {
1767 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1768 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1771 static always_inline uint8_t byte_reverse (uint8_t val)
1773 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1776 static always_inline uint32_t word_reverse (uint32_t val)
1778 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1779 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1782 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
1783 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
1785 uint32_t a, b, d, mask;
1787 mask = UINT32_MAX >> (32 - MASKBITS);
1788 a = arg1 & mask;
1789 b = arg2 & mask;
1790 d = word_reverse(1 + word_reverse(a | ~b));
1791 return (arg1 & ~mask) | (d & b);
1794 uint32_t helper_cntlsw32 (uint32_t val)
1796 if (val & 0x80000000)
1797 return clz32(~val);
1798 else
1799 return clz32(val);
1802 uint32_t helper_cntlzw32 (uint32_t val)
1804 return clz32(val);
1807 #define DO_SPE_OP1(name) \
1808 void do_ev##name (void) \
1810 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
1811 (uint64_t)_do_e##name(T0_64); \
1814 #define DO_SPE_OP2(name) \
1815 void do_ev##name (void) \
1817 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
1818 (uint64_t)_do_e##name(T0_64, T1_64); \
1821 /* Fixed-point vector comparisons */
1822 #define DO_SPE_CMP(name) \
1823 void do_ev##name (void) \
1825 T0 = _do_evcmp_merge((uint64_t)_do_e##name(T0_64 >> 32, \
1826 T1_64 >> 32) << 32, \
1827 _do_e##name(T0_64, T1_64)); \
1830 static always_inline uint32_t _do_evcmp_merge (int t0, int t1)
1832 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1835 /* Single precision floating-point conversions from/to integer */
1836 static always_inline uint32_t _do_efscfsi (int32_t val)
1838 CPU_FloatU u;
1840 u.f = int32_to_float32(val, &env->spe_status);
1842 return u.l;
1845 static always_inline uint32_t _do_efscfui (uint32_t val)
1847 CPU_FloatU u;
1849 u.f = uint32_to_float32(val, &env->spe_status);
1851 return u.l;
1854 static always_inline int32_t _do_efsctsi (uint32_t val)
1856 CPU_FloatU u;
1858 u.l = val;
1859 /* NaN are not treated the same way IEEE 754 does */
1860 if (unlikely(isnan(u.f)))
1861 return 0;
1863 return float32_to_int32(u.f, &env->spe_status);
1866 static always_inline uint32_t _do_efsctui (uint32_t val)
1868 CPU_FloatU u;
1870 u.l = val;
1871 /* NaN are not treated the same way IEEE 754 does */
1872 if (unlikely(isnan(u.f)))
1873 return 0;
1875 return float32_to_uint32(u.f, &env->spe_status);
1878 static always_inline int32_t _do_efsctsiz (uint32_t val)
1880 CPU_FloatU u;
1882 u.l = val;
1883 /* NaN are not treated the same way IEEE 754 does */
1884 if (unlikely(isnan(u.f)))
1885 return 0;
1887 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
1890 static always_inline uint32_t _do_efsctuiz (uint32_t val)
1892 CPU_FloatU u;
1894 u.l = val;
1895 /* NaN are not treated the same way IEEE 754 does */
1896 if (unlikely(isnan(u.f)))
1897 return 0;
1899 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
1902 void do_efscfsi (void)
1904 T0_64 = _do_efscfsi(T0_64);
1907 void do_efscfui (void)
1909 T0_64 = _do_efscfui(T0_64);
1912 void do_efsctsi (void)
1914 T0_64 = _do_efsctsi(T0_64);
1917 void do_efsctui (void)
1919 T0_64 = _do_efsctui(T0_64);
1922 void do_efsctsiz (void)
1924 T0_64 = _do_efsctsiz(T0_64);
1927 void do_efsctuiz (void)
1929 T0_64 = _do_efsctuiz(T0_64);
1932 /* Single precision floating-point conversion to/from fractional */
1933 static always_inline uint32_t _do_efscfsf (uint32_t val)
1935 CPU_FloatU u;
1936 float32 tmp;
1938 u.f = int32_to_float32(val, &env->spe_status);
1939 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
1940 u.f = float32_div(u.f, tmp, &env->spe_status);
1942 return u.l;
1945 static always_inline uint32_t _do_efscfuf (uint32_t val)
1947 CPU_FloatU u;
1948 float32 tmp;
1950 u.f = uint32_to_float32(val, &env->spe_status);
1951 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1952 u.f = float32_div(u.f, tmp, &env->spe_status);
1954 return u.l;
1957 static always_inline int32_t _do_efsctsf (uint32_t val)
1959 CPU_FloatU u;
1960 float32 tmp;
1962 u.l = val;
1963 /* NaN are not treated the same way IEEE 754 does */
1964 if (unlikely(isnan(u.f)))
1965 return 0;
1966 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1967 u.f = float32_mul(u.f, tmp, &env->spe_status);
1969 return float32_to_int32(u.f, &env->spe_status);
1972 static always_inline uint32_t _do_efsctuf (uint32_t val)
1974 CPU_FloatU u;
1975 float32 tmp;
1977 u.l = val;
1978 /* NaN are not treated the same way IEEE 754 does */
1979 if (unlikely(isnan(u.f)))
1980 return 0;
1981 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1982 u.f = float32_mul(u.f, tmp, &env->spe_status);
1984 return float32_to_uint32(u.f, &env->spe_status);
1987 static always_inline int32_t _do_efsctsfz (uint32_t val)
1989 CPU_FloatU u;
1990 float32 tmp;
1992 u.l = val;
1993 /* NaN are not treated the same way IEEE 754 does */
1994 if (unlikely(isnan(u.f)))
1995 return 0;
1996 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1997 u.f = float32_mul(u.f, tmp, &env->spe_status);
1999 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2002 static always_inline uint32_t _do_efsctufz (uint32_t val)
2004 CPU_FloatU u;
2005 float32 tmp;
2007 u.l = val;
2008 /* NaN are not treated the same way IEEE 754 does */
2009 if (unlikely(isnan(u.f)))
2010 return 0;
2011 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2012 u.f = float32_mul(u.f, tmp, &env->spe_status);
2014 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2017 void do_efscfsf (void)
2019 T0_64 = _do_efscfsf(T0_64);
2022 void do_efscfuf (void)
2024 T0_64 = _do_efscfuf(T0_64);
2027 void do_efsctsf (void)
2029 T0_64 = _do_efsctsf(T0_64);
2032 void do_efsctuf (void)
2034 T0_64 = _do_efsctuf(T0_64);
2037 void do_efsctsfz (void)
2039 T0_64 = _do_efsctsfz(T0_64);
2042 void do_efsctufz (void)
2044 T0_64 = _do_efsctufz(T0_64);
2047 /* Double precision floating point helpers */
2048 static always_inline int _do_efdcmplt (uint64_t op1, uint64_t op2)
2050 /* XXX: TODO: test special values (NaN, infinites, ...) */
2051 return _do_efdtstlt(op1, op2);
2054 static always_inline int _do_efdcmpgt (uint64_t op1, uint64_t op2)
2056 /* XXX: TODO: test special values (NaN, infinites, ...) */
2057 return _do_efdtstgt(op1, op2);
2060 static always_inline int _do_efdcmpeq (uint64_t op1, uint64_t op2)
2062 /* XXX: TODO: test special values (NaN, infinites, ...) */
2063 return _do_efdtsteq(op1, op2);
2066 void do_efdcmplt (void)
2068 T0 = _do_efdcmplt(T0_64, T1_64);
2071 void do_efdcmpgt (void)
2073 T0 = _do_efdcmpgt(T0_64, T1_64);
2076 void do_efdcmpeq (void)
2078 T0 = _do_efdcmpeq(T0_64, T1_64);
2081 /* Double precision floating-point conversion to/from integer */
2082 static always_inline uint64_t _do_efdcfsi (int64_t val)
2084 CPU_DoubleU u;
2086 u.d = int64_to_float64(val, &env->spe_status);
2088 return u.ll;
2091 static always_inline uint64_t _do_efdcfui (uint64_t val)
2093 CPU_DoubleU u;
2095 u.d = uint64_to_float64(val, &env->spe_status);
2097 return u.ll;
2100 static always_inline int64_t _do_efdctsi (uint64_t val)
2102 CPU_DoubleU u;
2104 u.ll = val;
2105 /* NaN are not treated the same way IEEE 754 does */
2106 if (unlikely(isnan(u.d)))
2107 return 0;
2109 return float64_to_int64(u.d, &env->spe_status);
2112 static always_inline uint64_t _do_efdctui (uint64_t val)
2114 CPU_DoubleU u;
2116 u.ll = val;
2117 /* NaN are not treated the same way IEEE 754 does */
2118 if (unlikely(isnan(u.d)))
2119 return 0;
2121 return float64_to_uint64(u.d, &env->spe_status);
2124 static always_inline int64_t _do_efdctsiz (uint64_t val)
2126 CPU_DoubleU u;
2128 u.ll = val;
2129 /* NaN are not treated the same way IEEE 754 does */
2130 if (unlikely(isnan(u.d)))
2131 return 0;
2133 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2136 static always_inline uint64_t _do_efdctuiz (uint64_t val)
2138 CPU_DoubleU u;
2140 u.ll = val;
2141 /* NaN are not treated the same way IEEE 754 does */
2142 if (unlikely(isnan(u.d)))
2143 return 0;
2145 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2148 void do_efdcfsi (void)
2150 T0_64 = _do_efdcfsi(T0_64);
2153 void do_efdcfui (void)
2155 T0_64 = _do_efdcfui(T0_64);
2158 void do_efdctsi (void)
2160 T0_64 = _do_efdctsi(T0_64);
2163 void do_efdctui (void)
2165 T0_64 = _do_efdctui(T0_64);
2168 void do_efdctsiz (void)
2170 T0_64 = _do_efdctsiz(T0_64);
2173 void do_efdctuiz (void)
2175 T0_64 = _do_efdctuiz(T0_64);
2178 /* Double precision floating-point conversion to/from fractional */
2179 static always_inline uint64_t _do_efdcfsf (int64_t val)
2181 CPU_DoubleU u;
2182 float64 tmp;
2184 u.d = int32_to_float64(val, &env->spe_status);
2185 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2186 u.d = float64_div(u.d, tmp, &env->spe_status);
2188 return u.ll;
2191 static always_inline uint64_t _do_efdcfuf (uint64_t val)
2193 CPU_DoubleU u;
2194 float64 tmp;
2196 u.d = uint32_to_float64(val, &env->spe_status);
2197 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2198 u.d = float64_div(u.d, tmp, &env->spe_status);
2200 return u.ll;
2203 static always_inline int64_t _do_efdctsf (uint64_t val)
2205 CPU_DoubleU u;
2206 float64 tmp;
2208 u.ll = val;
2209 /* NaN are not treated the same way IEEE 754 does */
2210 if (unlikely(isnan(u.d)))
2211 return 0;
2212 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2213 u.d = float64_mul(u.d, tmp, &env->spe_status);
2215 return float64_to_int32(u.d, &env->spe_status);
2218 static always_inline uint64_t _do_efdctuf (uint64_t val)
2220 CPU_DoubleU u;
2221 float64 tmp;
2223 u.ll = val;
2224 /* NaN are not treated the same way IEEE 754 does */
2225 if (unlikely(isnan(u.d)))
2226 return 0;
2227 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2228 u.d = float64_mul(u.d, tmp, &env->spe_status);
2230 return float64_to_uint32(u.d, &env->spe_status);
2233 static always_inline int64_t _do_efdctsfz (uint64_t val)
2235 CPU_DoubleU u;
2236 float64 tmp;
2238 u.ll = val;
2239 /* NaN are not treated the same way IEEE 754 does */
2240 if (unlikely(isnan(u.d)))
2241 return 0;
2242 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2243 u.d = float64_mul(u.d, tmp, &env->spe_status);
2245 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2248 static always_inline uint64_t _do_efdctufz (uint64_t val)
2250 CPU_DoubleU u;
2251 float64 tmp;
2253 u.ll = val;
2254 /* NaN are not treated the same way IEEE 754 does */
2255 if (unlikely(isnan(u.d)))
2256 return 0;
2257 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2258 u.d = float64_mul(u.d, tmp, &env->spe_status);
2260 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2263 void do_efdcfsf (void)
2265 T0_64 = _do_efdcfsf(T0_64);
2268 void do_efdcfuf (void)
2270 T0_64 = _do_efdcfuf(T0_64);
2273 void do_efdctsf (void)
2275 T0_64 = _do_efdctsf(T0_64);
2278 void do_efdctuf (void)
2280 T0_64 = _do_efdctuf(T0_64);
2283 void do_efdctsfz (void)
2285 T0_64 = _do_efdctsfz(T0_64);
2288 void do_efdctufz (void)
2290 T0_64 = _do_efdctufz(T0_64);
2293 /* Floating point conversion between single and double precision */
2294 static always_inline uint32_t _do_efscfd (uint64_t val)
2296 CPU_DoubleU u1;
2297 CPU_FloatU u2;
2299 u1.ll = val;
2300 u2.f = float64_to_float32(u1.d, &env->spe_status);
2302 return u2.l;
2305 static always_inline uint64_t _do_efdcfs (uint32_t val)
2307 CPU_DoubleU u2;
2308 CPU_FloatU u1;
2310 u1.l = val;
2311 u2.d = float32_to_float64(u1.f, &env->spe_status);
2313 return u2.ll;
2316 void do_efscfd (void)
2318 T0_64 = _do_efscfd(T0_64);
2321 void do_efdcfs (void)
2323 T0_64 = _do_efdcfs(T0_64);
2326 /* Single precision fixed-point vector arithmetic */
2327 /* evfsabs */
2328 DO_SPE_OP1(fsabs);
2329 /* evfsnabs */
2330 DO_SPE_OP1(fsnabs);
2331 /* evfsneg */
2332 DO_SPE_OP1(fsneg);
2333 /* evfsadd */
2334 DO_SPE_OP2(fsadd);
2335 /* evfssub */
2336 DO_SPE_OP2(fssub);
2337 /* evfsmul */
2338 DO_SPE_OP2(fsmul);
2339 /* evfsdiv */
2340 DO_SPE_OP2(fsdiv);
2342 /* Single-precision floating-point comparisons */
2343 static always_inline int _do_efscmplt (uint32_t op1, uint32_t op2)
2345 /* XXX: TODO: test special values (NaN, infinites, ...) */
2346 return _do_efststlt(op1, op2);
2349 static always_inline int _do_efscmpgt (uint32_t op1, uint32_t op2)
2351 /* XXX: TODO: test special values (NaN, infinites, ...) */
2352 return _do_efststgt(op1, op2);
2355 static always_inline int _do_efscmpeq (uint32_t op1, uint32_t op2)
2357 /* XXX: TODO: test special values (NaN, infinites, ...) */
2358 return _do_efststeq(op1, op2);
2361 void do_efscmplt (void)
2363 T0 = _do_efscmplt(T0_64, T1_64);
2366 void do_efscmpgt (void)
2368 T0 = _do_efscmpgt(T0_64, T1_64);
2371 void do_efscmpeq (void)
2373 T0 = _do_efscmpeq(T0_64, T1_64);
2376 /* Single-precision floating-point vector comparisons */
2377 /* evfscmplt */
2378 DO_SPE_CMP(fscmplt);
2379 /* evfscmpgt */
2380 DO_SPE_CMP(fscmpgt);
2381 /* evfscmpeq */
2382 DO_SPE_CMP(fscmpeq);
2383 /* evfststlt */
2384 DO_SPE_CMP(fststlt);
2385 /* evfststgt */
2386 DO_SPE_CMP(fststgt);
2387 /* evfststeq */
2388 DO_SPE_CMP(fststeq);
2390 /* Single-precision floating-point vector conversions */
2391 /* evfscfsi */
2392 DO_SPE_OP1(fscfsi);
2393 /* evfscfui */
2394 DO_SPE_OP1(fscfui);
2395 /* evfscfuf */
2396 DO_SPE_OP1(fscfuf);
2397 /* evfscfsf */
2398 DO_SPE_OP1(fscfsf);
2399 /* evfsctsi */
2400 DO_SPE_OP1(fsctsi);
2401 /* evfsctui */
2402 DO_SPE_OP1(fsctui);
2403 /* evfsctsiz */
2404 DO_SPE_OP1(fsctsiz);
2405 /* evfsctuiz */
2406 DO_SPE_OP1(fsctuiz);
2407 /* evfsctsf */
2408 DO_SPE_OP1(fsctsf);
2409 /* evfsctuf */
2410 DO_SPE_OP1(fsctuf);
2412 /*****************************************************************************/
2413 /* Softmmu support */
2414 #if !defined (CONFIG_USER_ONLY)
2416 #define MMUSUFFIX _mmu
2418 #define SHIFT 0
2419 #include "softmmu_template.h"
2421 #define SHIFT 1
2422 #include "softmmu_template.h"
2424 #define SHIFT 2
2425 #include "softmmu_template.h"
2427 #define SHIFT 3
2428 #include "softmmu_template.h"
2430 /* try to fill the TLB and return an exception if error. If retaddr is
2431 NULL, it means that the function was called in C code (i.e. not
2432 from generated code or from helper.c) */
2433 /* XXX: fix it to restore all registers */
2434 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2436 TranslationBlock *tb;
2437 CPUState *saved_env;
2438 unsigned long pc;
2439 int ret;
2441 /* XXX: hack to restore env in all cases, even if not called from
2442 generated code */
2443 saved_env = env;
2444 env = cpu_single_env;
2445 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2446 if (unlikely(ret != 0)) {
2447 if (likely(retaddr)) {
2448 /* now we have a real cpu fault */
2449 pc = (unsigned long)retaddr;
2450 tb = tb_find_pc(pc);
2451 if (likely(tb)) {
2452 /* the PC is inside the translated code. It means that we have
2453 a virtual CPU fault */
2454 cpu_restore_state(tb, env, pc, NULL);
2457 do_raise_exception_err(env->exception_index, env->error_code);
2459 env = saved_env;
2462 /* Software driven TLBs management */
2463 /* PowerPC 602/603 software TLB load instructions helpers */
2464 void do_load_6xx_tlb (int is_code)
2466 target_ulong RPN, CMP, EPN;
2467 int way;
2469 RPN = env->spr[SPR_RPA];
2470 if (is_code) {
2471 CMP = env->spr[SPR_ICMP];
2472 EPN = env->spr[SPR_IMISS];
2473 } else {
2474 CMP = env->spr[SPR_DCMP];
2475 EPN = env->spr[SPR_DMISS];
2477 way = (env->spr[SPR_SRR1] >> 17) & 1;
2478 #if defined (DEBUG_SOFTWARE_TLB)
2479 if (loglevel != 0) {
2480 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2481 " PTE1 " ADDRX " way %d\n",
2482 __func__, T0, EPN, CMP, RPN, way);
2484 #endif
2485 /* Store this TLB */
2486 ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
2487 way, is_code, CMP, RPN);
2490 void do_load_74xx_tlb (int is_code)
2492 target_ulong RPN, CMP, EPN;
2493 int way;
2495 RPN = env->spr[SPR_PTELO];
2496 CMP = env->spr[SPR_PTEHI];
2497 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2498 way = env->spr[SPR_TLBMISS] & 0x3;
2499 #if defined (DEBUG_SOFTWARE_TLB)
2500 if (loglevel != 0) {
2501 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2502 " PTE1 " ADDRX " way %d\n",
2503 __func__, T0, EPN, CMP, RPN, way);
2505 #endif
2506 /* Store this TLB */
2507 ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
2508 way, is_code, CMP, RPN);
2511 static always_inline target_ulong booke_tlb_to_page_size (int size)
2513 return 1024 << (2 * size);
2516 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2518 int size;
2520 switch (page_size) {
2521 case 0x00000400UL:
2522 size = 0x0;
2523 break;
2524 case 0x00001000UL:
2525 size = 0x1;
2526 break;
2527 case 0x00004000UL:
2528 size = 0x2;
2529 break;
2530 case 0x00010000UL:
2531 size = 0x3;
2532 break;
2533 case 0x00040000UL:
2534 size = 0x4;
2535 break;
2536 case 0x00100000UL:
2537 size = 0x5;
2538 break;
2539 case 0x00400000UL:
2540 size = 0x6;
2541 break;
2542 case 0x01000000UL:
2543 size = 0x7;
2544 break;
2545 case 0x04000000UL:
2546 size = 0x8;
2547 break;
2548 case 0x10000000UL:
2549 size = 0x9;
2550 break;
2551 case 0x40000000UL:
2552 size = 0xA;
2553 break;
2554 #if defined (TARGET_PPC64)
2555 case 0x000100000000ULL:
2556 size = 0xB;
2557 break;
2558 case 0x000400000000ULL:
2559 size = 0xC;
2560 break;
2561 case 0x001000000000ULL:
2562 size = 0xD;
2563 break;
2564 case 0x004000000000ULL:
2565 size = 0xE;
2566 break;
2567 case 0x010000000000ULL:
2568 size = 0xF;
2569 break;
2570 #endif
2571 default:
2572 size = -1;
2573 break;
2576 return size;
2579 /* Helpers for 4xx TLB management */
2580 void do_4xx_tlbre_lo (void)
2582 ppcemb_tlb_t *tlb;
2583 int size;
2585 T0 &= 0x3F;
2586 tlb = &env->tlb[T0].tlbe;
2587 T0 = tlb->EPN;
2588 if (tlb->prot & PAGE_VALID)
2589 T0 |= 0x400;
2590 size = booke_page_size_to_tlb(tlb->size);
2591 if (size < 0 || size > 0x7)
2592 size = 1;
2593 T0 |= size << 7;
2594 env->spr[SPR_40x_PID] = tlb->PID;
2597 void do_4xx_tlbre_hi (void)
2599 ppcemb_tlb_t *tlb;
2601 T0 &= 0x3F;
2602 tlb = &env->tlb[T0].tlbe;
2603 T0 = tlb->RPN;
2604 if (tlb->prot & PAGE_EXEC)
2605 T0 |= 0x200;
2606 if (tlb->prot & PAGE_WRITE)
2607 T0 |= 0x100;
2610 void do_4xx_tlbwe_hi (void)
2612 ppcemb_tlb_t *tlb;
2613 target_ulong page, end;
2615 #if defined (DEBUG_SOFTWARE_TLB)
2616 if (loglevel != 0) {
2617 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2619 #endif
2620 T0 &= 0x3F;
2621 tlb = &env->tlb[T0].tlbe;
2622 /* Invalidate previous TLB (if it's valid) */
2623 if (tlb->prot & PAGE_VALID) {
2624 end = tlb->EPN + tlb->size;
2625 #if defined (DEBUG_SOFTWARE_TLB)
2626 if (loglevel != 0) {
2627 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2628 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2630 #endif
2631 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2632 tlb_flush_page(env, page);
2634 tlb->size = booke_tlb_to_page_size((T1 >> 7) & 0x7);
2635 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2636 * If this ever occurs, one should use the ppcemb target instead
2637 * of the ppc or ppc64 one
2639 if ((T1 & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2640 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2641 "are not supported (%d)\n",
2642 tlb->size, TARGET_PAGE_SIZE, (int)((T1 >> 7) & 0x7));
2644 tlb->EPN = T1 & ~(tlb->size - 1);
2645 if (T1 & 0x40)
2646 tlb->prot |= PAGE_VALID;
2647 else
2648 tlb->prot &= ~PAGE_VALID;
2649 if (T1 & 0x20) {
2650 /* XXX: TO BE FIXED */
2651 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2653 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2654 tlb->attr = T1 & 0xFF;
2655 #if defined (DEBUG_SOFTWARE_TLB)
2656 if (loglevel != 0) {
2657 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2658 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2659 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2660 tlb->prot & PAGE_READ ? 'r' : '-',
2661 tlb->prot & PAGE_WRITE ? 'w' : '-',
2662 tlb->prot & PAGE_EXEC ? 'x' : '-',
2663 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2665 #endif
2666 /* Invalidate new TLB (if valid) */
2667 if (tlb->prot & PAGE_VALID) {
2668 end = tlb->EPN + tlb->size;
2669 #if defined (DEBUG_SOFTWARE_TLB)
2670 if (loglevel != 0) {
2671 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2672 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2674 #endif
2675 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2676 tlb_flush_page(env, page);
2680 void do_4xx_tlbwe_lo (void)
2682 ppcemb_tlb_t *tlb;
2684 #if defined (DEBUG_SOFTWARE_TLB)
2685 if (loglevel != 0) {
2686 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2688 #endif
2689 T0 &= 0x3F;
2690 tlb = &env->tlb[T0].tlbe;
2691 tlb->RPN = T1 & 0xFFFFFC00;
2692 tlb->prot = PAGE_READ;
2693 if (T1 & 0x200)
2694 tlb->prot |= PAGE_EXEC;
2695 if (T1 & 0x100)
2696 tlb->prot |= PAGE_WRITE;
2697 #if defined (DEBUG_SOFTWARE_TLB)
2698 if (loglevel != 0) {
2699 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2700 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2701 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2702 tlb->prot & PAGE_READ ? 'r' : '-',
2703 tlb->prot & PAGE_WRITE ? 'w' : '-',
2704 tlb->prot & PAGE_EXEC ? 'x' : '-',
2705 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2707 #endif
2710 /* PowerPC 440 TLB management */
2711 void do_440_tlbwe (int word)
2713 ppcemb_tlb_t *tlb;
2714 target_ulong EPN, RPN, size;
2715 int do_flush_tlbs;
2717 #if defined (DEBUG_SOFTWARE_TLB)
2718 if (loglevel != 0) {
2719 fprintf(logfile, "%s word %d T0 " TDX " T1 " TDX "\n",
2720 __func__, word, T0, T1);
2722 #endif
2723 do_flush_tlbs = 0;
2724 T0 &= 0x3F;
2725 tlb = &env->tlb[T0].tlbe;
2726 switch (word) {
2727 default:
2728 /* Just here to please gcc */
2729 case 0:
2730 EPN = T1 & 0xFFFFFC00;
2731 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
2732 do_flush_tlbs = 1;
2733 tlb->EPN = EPN;
2734 size = booke_tlb_to_page_size((T1 >> 4) & 0xF);
2735 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
2736 do_flush_tlbs = 1;
2737 tlb->size = size;
2738 tlb->attr &= ~0x1;
2739 tlb->attr |= (T1 >> 8) & 1;
2740 if (T1 & 0x200) {
2741 tlb->prot |= PAGE_VALID;
2742 } else {
2743 if (tlb->prot & PAGE_VALID) {
2744 tlb->prot &= ~PAGE_VALID;
2745 do_flush_tlbs = 1;
2748 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2749 if (do_flush_tlbs)
2750 tlb_flush(env, 1);
2751 break;
2752 case 1:
2753 RPN = T1 & 0xFFFFFC0F;
2754 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
2755 tlb_flush(env, 1);
2756 tlb->RPN = RPN;
2757 break;
2758 case 2:
2759 tlb->attr = (tlb->attr & 0x1) | (T1 & 0x0000FF00);
2760 tlb->prot = tlb->prot & PAGE_VALID;
2761 if (T1 & 0x1)
2762 tlb->prot |= PAGE_READ << 4;
2763 if (T1 & 0x2)
2764 tlb->prot |= PAGE_WRITE << 4;
2765 if (T1 & 0x4)
2766 tlb->prot |= PAGE_EXEC << 4;
2767 if (T1 & 0x8)
2768 tlb->prot |= PAGE_READ;
2769 if (T1 & 0x10)
2770 tlb->prot |= PAGE_WRITE;
2771 if (T1 & 0x20)
2772 tlb->prot |= PAGE_EXEC;
2773 break;
2777 void do_440_tlbre (int word)
2779 ppcemb_tlb_t *tlb;
2780 int size;
2782 T0 &= 0x3F;
2783 tlb = &env->tlb[T0].tlbe;
2784 switch (word) {
2785 default:
2786 /* Just here to please gcc */
2787 case 0:
2788 T0 = tlb->EPN;
2789 size = booke_page_size_to_tlb(tlb->size);
2790 if (size < 0 || size > 0xF)
2791 size = 1;
2792 T0 |= size << 4;
2793 if (tlb->attr & 0x1)
2794 T0 |= 0x100;
2795 if (tlb->prot & PAGE_VALID)
2796 T0 |= 0x200;
2797 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2798 env->spr[SPR_440_MMUCR] |= tlb->PID;
2799 break;
2800 case 1:
2801 T0 = tlb->RPN;
2802 break;
2803 case 2:
2804 T0 = tlb->attr & ~0x1;
2805 if (tlb->prot & (PAGE_READ << 4))
2806 T0 |= 0x1;
2807 if (tlb->prot & (PAGE_WRITE << 4))
2808 T0 |= 0x2;
2809 if (tlb->prot & (PAGE_EXEC << 4))
2810 T0 |= 0x4;
2811 if (tlb->prot & PAGE_READ)
2812 T0 |= 0x8;
2813 if (tlb->prot & PAGE_WRITE)
2814 T0 |= 0x10;
2815 if (tlb->prot & PAGE_EXEC)
2816 T0 |= 0x20;
2817 break;
2820 #endif /* !CONFIG_USER_ONLY */