Update
[qemu-kvm/fedora.git] / target-ppc / op_helper.c
blobde6369a9bbe53047bc16e4b176d38d8230843f91
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "helper.h"
24 #include "helper_regs.h"
25 #include "op_helper.h"
27 //#define DEBUG_OP
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
34 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
36 raise_exception_err(env, exception, error_code);
39 void helper_raise_debug (void)
41 raise_exception(env, EXCP_DEBUG);
44 /*****************************************************************************/
45 /* Registers load and stores */
46 target_ulong helper_load_cr (void)
48 return (env->crf[0] << 28) |
49 (env->crf[1] << 24) |
50 (env->crf[2] << 20) |
51 (env->crf[3] << 16) |
52 (env->crf[4] << 12) |
53 (env->crf[5] << 8) |
54 (env->crf[6] << 4) |
55 (env->crf[7] << 0);
58 void helper_store_cr (target_ulong val, uint32_t mask)
60 int i, sh;
62 for (i = 0, sh = 7; i < 8; i++, sh--) {
63 if (mask & (1 << sh))
64 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
68 #if defined(TARGET_PPC64)
69 void do_store_pri (int prio)
71 env->spr[SPR_PPR] &= ~0x001C000000000000ULL;
72 env->spr[SPR_PPR] |= ((uint64_t)prio & 0x7) << 50;
74 #endif
76 target_ulong ppc_load_dump_spr (int sprn)
78 if (loglevel != 0) {
79 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
80 sprn, sprn, env->spr[sprn]);
83 return env->spr[sprn];
86 void ppc_store_dump_spr (int sprn, target_ulong val)
88 if (loglevel != 0) {
89 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
90 sprn, sprn, env->spr[sprn], val);
92 env->spr[sprn] = val;
95 /*****************************************************************************/
96 /* Memory load and stores */
98 static always_inline target_ulong get_addr(target_ulong addr)
100 #if defined(TARGET_PPC64)
101 if (msr_sf)
102 return addr;
103 else
104 #endif
105 return (uint32_t)addr;
108 void helper_lmw (target_ulong addr, uint32_t reg)
110 for (; reg < 32; reg++, addr += 4) {
111 if (msr_le)
112 env->gpr[reg] = bswap32(ldl(get_addr(addr)));
113 else
114 env->gpr[reg] = ldl(get_addr(addr));
118 void helper_stmw (target_ulong addr, uint32_t reg)
120 for (; reg < 32; reg++, addr += 4) {
121 if (msr_le)
122 stl(get_addr(addr), bswap32((uint32_t)env->gpr[reg]));
123 else
124 stl(get_addr(addr), (uint32_t)env->gpr[reg]);
128 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
130 int sh;
131 for (; nb > 3; nb -= 4, addr += 4) {
132 env->gpr[reg] = ldl(get_addr(addr));
133 reg = (reg + 1) % 32;
135 if (unlikely(nb > 0)) {
136 env->gpr[reg] = 0;
137 for (sh = 24; nb > 0; nb--, addr++, sh -= 8) {
138 env->gpr[reg] |= ldub(get_addr(addr)) << sh;
142 /* PPC32 specification says we must generate an exception if
143 * rA is in the range of registers to be loaded.
144 * In an other hand, IBM says this is valid, but rA won't be loaded.
145 * For now, I'll follow the spec...
147 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
149 if (likely(xer_bc != 0)) {
150 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
151 (reg < rb && (reg + xer_bc) > rb))) {
152 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
153 POWERPC_EXCP_INVAL |
154 POWERPC_EXCP_INVAL_LSWX);
155 } else {
156 helper_lsw(addr, xer_bc, reg);
161 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
163 int sh;
164 for (; nb > 3; nb -= 4, addr += 4) {
165 stl(get_addr(addr), env->gpr[reg]);
166 reg = (reg + 1) % 32;
168 if (unlikely(nb > 0)) {
169 for (sh = 24; nb > 0; nb--, addr++, sh -= 8)
170 stb(get_addr(addr), (env->gpr[reg] >> sh) & 0xFF);
174 static void do_dcbz(target_ulong addr, int dcache_line_size)
176 target_long mask = get_addr(~(dcache_line_size - 1));
177 int i;
178 addr &= mask;
179 for (i = 0 ; i < dcache_line_size ; i += 4) {
180 stl(addr + i , 0);
182 if ((env->reserve & mask) == addr)
183 env->reserve = (target_ulong)-1ULL;
186 void helper_dcbz(target_ulong addr)
188 do_dcbz(addr, env->dcache_line_size);
191 void helper_dcbz_970(target_ulong addr)
193 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
194 do_dcbz(addr, 32);
195 else
196 do_dcbz(addr, env->dcache_line_size);
199 void helper_icbi(target_ulong addr)
201 uint32_t tmp;
203 addr = get_addr(addr & ~(env->dcache_line_size - 1));
204 /* Invalidate one cache line :
205 * PowerPC specification says this is to be treated like a load
206 * (not a fetch) by the MMU. To be sure it will be so,
207 * do the load "by hand".
209 tmp = ldl(addr);
210 tb_invalidate_page_range(addr, addr + env->icache_line_size);
213 // XXX: to be tested
214 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
216 int i, c, d;
217 d = 24;
218 for (i = 0; i < xer_bc; i++) {
219 c = ldub((uint32_t)addr++);
220 /* ra (if not 0) and rb are never modified */
221 if (likely(reg != rb && (ra == 0 || reg != ra))) {
222 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
224 if (unlikely(c == xer_cmp))
225 break;
226 if (likely(d != 0)) {
227 d -= 8;
228 } else {
229 d = 24;
230 reg++;
231 reg = reg & 0x1F;
234 return i;
237 /*****************************************************************************/
238 /* Fixed point operations helpers */
239 #if defined(TARGET_PPC64)
241 /* multiply high word */
242 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
244 uint64_t tl, th;
246 muls64(&tl, &th, arg1, arg2);
247 return th;
250 /* multiply high word unsigned */
251 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
253 uint64_t tl, th;
255 mulu64(&tl, &th, arg1, arg2);
256 return th;
259 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
261 int64_t th;
262 uint64_t tl;
264 muls64(&tl, (uint64_t *)&th, arg1, arg2);
265 /* If th != 0 && th != -1, then we had an overflow */
266 if (likely((uint64_t)(th + 1) <= 1)) {
267 env->xer &= ~(1 << XER_OV);
268 } else {
269 env->xer |= (1 << XER_OV) | (1 << XER_SO);
271 return (int64_t)tl;
273 #endif
275 target_ulong helper_cntlzw (target_ulong t)
277 return clz32(t);
280 #if defined(TARGET_PPC64)
281 target_ulong helper_cntlzd (target_ulong t)
283 return clz64(t);
285 #endif
287 /* shift right arithmetic helper */
288 target_ulong helper_sraw (target_ulong value, target_ulong shift)
290 int32_t ret;
292 if (likely(!(shift & 0x20))) {
293 if (likely((uint32_t)shift != 0)) {
294 shift &= 0x1f;
295 ret = (int32_t)value >> shift;
296 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
297 env->xer &= ~(1 << XER_CA);
298 } else {
299 env->xer |= (1 << XER_CA);
301 } else {
302 ret = (int32_t)value;
303 env->xer &= ~(1 << XER_CA);
305 } else {
306 ret = (int32_t)value >> 31;
307 if (ret) {
308 env->xer |= (1 << XER_CA);
309 } else {
310 env->xer &= ~(1 << XER_CA);
313 return (target_long)ret;
316 #if defined(TARGET_PPC64)
317 target_ulong helper_srad (target_ulong value, target_ulong shift)
319 int64_t ret;
321 if (likely(!(shift & 0x40))) {
322 if (likely((uint64_t)shift != 0)) {
323 shift &= 0x3f;
324 ret = (int64_t)value >> shift;
325 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
326 env->xer &= ~(1 << XER_CA);
327 } else {
328 env->xer |= (1 << XER_CA);
330 } else {
331 ret = (int64_t)value;
332 env->xer &= ~(1 << XER_CA);
334 } else {
335 ret = (int64_t)value >> 63;
336 if (ret) {
337 env->xer |= (1 << XER_CA);
338 } else {
339 env->xer &= ~(1 << XER_CA);
342 return ret;
344 #endif
346 target_ulong helper_popcntb (target_ulong val)
348 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
349 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
350 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
351 return val;
354 #if defined(TARGET_PPC64)
355 target_ulong helper_popcntb_64 (target_ulong val)
357 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
358 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
359 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
360 return val;
362 #endif
364 /*****************************************************************************/
365 /* Floating point operations helpers */
366 uint64_t helper_float32_to_float64(uint32_t arg)
368 CPU_FloatU f;
369 CPU_DoubleU d;
370 f.l = arg;
371 d.d = float32_to_float64(f.f, &env->fp_status);
372 return d.ll;
375 uint32_t helper_float64_to_float32(uint64_t arg)
377 CPU_FloatU f;
378 CPU_DoubleU d;
379 d.ll = arg;
380 f.f = float64_to_float32(d.d, &env->fp_status);
381 return f.l;
384 static always_inline int fpisneg (float64 d)
386 CPU_DoubleU u;
388 u.d = d;
390 return u.ll >> 63 != 0;
393 static always_inline int isden (float64 d)
395 CPU_DoubleU u;
397 u.d = d;
399 return ((u.ll >> 52) & 0x7FF) == 0;
402 static always_inline int iszero (float64 d)
404 CPU_DoubleU u;
406 u.d = d;
408 return (u.ll & ~0x8000000000000000ULL) == 0;
411 static always_inline int isinfinity (float64 d)
413 CPU_DoubleU u;
415 u.d = d;
417 return ((u.ll >> 52) & 0x7FF) == 0x7FF &&
418 (u.ll & 0x000FFFFFFFFFFFFFULL) == 0;
421 #ifdef CONFIG_SOFTFLOAT
422 static always_inline int isfinite (float64 d)
424 CPU_DoubleU u;
426 u.d = d;
428 return (((u.ll >> 52) & 0x7FF) != 0x7FF);
431 static always_inline int isnormal (float64 d)
433 CPU_DoubleU u;
435 u.d = d;
437 uint32_t exp = (u.ll >> 52) & 0x7FF;
438 return ((0 < exp) && (exp < 0x7FF));
440 #endif
442 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
444 CPU_DoubleU farg;
445 int isneg;
446 int ret;
447 farg.ll = arg;
448 isneg = fpisneg(farg.d);
449 if (unlikely(float64_is_nan(farg.d))) {
450 if (float64_is_signaling_nan(farg.d)) {
451 /* Signaling NaN: flags are undefined */
452 ret = 0x00;
453 } else {
454 /* Quiet NaN */
455 ret = 0x11;
457 } else if (unlikely(isinfinity(farg.d))) {
458 /* +/- infinity */
459 if (isneg)
460 ret = 0x09;
461 else
462 ret = 0x05;
463 } else {
464 if (iszero(farg.d)) {
465 /* +/- zero */
466 if (isneg)
467 ret = 0x12;
468 else
469 ret = 0x02;
470 } else {
471 if (isden(farg.d)) {
472 /* Denormalized numbers */
473 ret = 0x10;
474 } else {
475 /* Normalized numbers */
476 ret = 0x00;
478 if (isneg) {
479 ret |= 0x08;
480 } else {
481 ret |= 0x04;
485 if (set_fprf) {
486 /* We update FPSCR_FPRF */
487 env->fpscr &= ~(0x1F << FPSCR_FPRF);
488 env->fpscr |= ret << FPSCR_FPRF;
490 /* We just need fpcc to update Rc1 */
491 return ret & 0xF;
494 /* Floating-point invalid operations exception */
495 static always_inline uint64_t fload_invalid_op_excp (int op)
497 uint64_t ret = 0;
498 int ve;
500 ve = fpscr_ve;
501 if (op & POWERPC_EXCP_FP_VXSNAN) {
502 /* Operation on signaling NaN */
503 env->fpscr |= 1 << FPSCR_VXSNAN;
505 if (op & POWERPC_EXCP_FP_VXSOFT) {
506 /* Software-defined condition */
507 env->fpscr |= 1 << FPSCR_VXSOFT;
509 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
510 case POWERPC_EXCP_FP_VXISI:
511 /* Magnitude subtraction of infinities */
512 env->fpscr |= 1 << FPSCR_VXISI;
513 goto update_arith;
514 case POWERPC_EXCP_FP_VXIDI:
515 /* Division of infinity by infinity */
516 env->fpscr |= 1 << FPSCR_VXIDI;
517 goto update_arith;
518 case POWERPC_EXCP_FP_VXZDZ:
519 /* Division of zero by zero */
520 env->fpscr |= 1 << FPSCR_VXZDZ;
521 goto update_arith;
522 case POWERPC_EXCP_FP_VXIMZ:
523 /* Multiplication of zero by infinity */
524 env->fpscr |= 1 << FPSCR_VXIMZ;
525 goto update_arith;
526 case POWERPC_EXCP_FP_VXVC:
527 /* Ordered comparison of NaN */
528 env->fpscr |= 1 << FPSCR_VXVC;
529 env->fpscr &= ~(0xF << FPSCR_FPCC);
530 env->fpscr |= 0x11 << FPSCR_FPCC;
531 /* We must update the target FPR before raising the exception */
532 if (ve != 0) {
533 env->exception_index = POWERPC_EXCP_PROGRAM;
534 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
535 /* Update the floating-point enabled exception summary */
536 env->fpscr |= 1 << FPSCR_FEX;
537 /* Exception is differed */
538 ve = 0;
540 break;
541 case POWERPC_EXCP_FP_VXSQRT:
542 /* Square root of a negative number */
543 env->fpscr |= 1 << FPSCR_VXSQRT;
544 update_arith:
545 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
546 if (ve == 0) {
547 /* Set the result to quiet NaN */
548 ret = UINT64_MAX;
549 env->fpscr &= ~(0xF << FPSCR_FPCC);
550 env->fpscr |= 0x11 << FPSCR_FPCC;
552 break;
553 case POWERPC_EXCP_FP_VXCVI:
554 /* Invalid conversion */
555 env->fpscr |= 1 << FPSCR_VXCVI;
556 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
557 if (ve == 0) {
558 /* Set the result to quiet NaN */
559 ret = UINT64_MAX;
560 env->fpscr &= ~(0xF << FPSCR_FPCC);
561 env->fpscr |= 0x11 << FPSCR_FPCC;
563 break;
565 /* Update the floating-point invalid operation summary */
566 env->fpscr |= 1 << FPSCR_VX;
567 /* Update the floating-point exception summary */
568 env->fpscr |= 1 << FPSCR_FX;
569 if (ve != 0) {
570 /* Update the floating-point enabled exception summary */
571 env->fpscr |= 1 << FPSCR_FEX;
572 if (msr_fe0 != 0 || msr_fe1 != 0)
573 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
575 return ret;
578 static always_inline uint64_t float_zero_divide_excp (uint64_t arg1, uint64_t arg2)
580 env->fpscr |= 1 << FPSCR_ZX;
581 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
582 /* Update the floating-point exception summary */
583 env->fpscr |= 1 << FPSCR_FX;
584 if (fpscr_ze != 0) {
585 /* Update the floating-point enabled exception summary */
586 env->fpscr |= 1 << FPSCR_FEX;
587 if (msr_fe0 != 0 || msr_fe1 != 0) {
588 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
589 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
591 } else {
592 /* Set the result to infinity */
593 arg1 = ((arg1 ^ arg2) & 0x8000000000000000ULL);
594 arg1 |= 0x7FFULL << 52;
596 return arg1;
599 static always_inline void float_overflow_excp (void)
601 env->fpscr |= 1 << FPSCR_OX;
602 /* Update the floating-point exception summary */
603 env->fpscr |= 1 << FPSCR_FX;
604 if (fpscr_oe != 0) {
605 /* XXX: should adjust the result */
606 /* Update the floating-point enabled exception summary */
607 env->fpscr |= 1 << FPSCR_FEX;
608 /* We must update the target FPR before raising the exception */
609 env->exception_index = POWERPC_EXCP_PROGRAM;
610 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
611 } else {
612 env->fpscr |= 1 << FPSCR_XX;
613 env->fpscr |= 1 << FPSCR_FI;
617 static always_inline void float_underflow_excp (void)
619 env->fpscr |= 1 << FPSCR_UX;
620 /* Update the floating-point exception summary */
621 env->fpscr |= 1 << FPSCR_FX;
622 if (fpscr_ue != 0) {
623 /* XXX: should adjust the result */
624 /* Update the floating-point enabled exception summary */
625 env->fpscr |= 1 << FPSCR_FEX;
626 /* We must update the target FPR before raising the exception */
627 env->exception_index = POWERPC_EXCP_PROGRAM;
628 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
632 static always_inline void float_inexact_excp (void)
634 env->fpscr |= 1 << FPSCR_XX;
635 /* Update the floating-point exception summary */
636 env->fpscr |= 1 << FPSCR_FX;
637 if (fpscr_xe != 0) {
638 /* Update the floating-point enabled exception summary */
639 env->fpscr |= 1 << FPSCR_FEX;
640 /* We must update the target FPR before raising the exception */
641 env->exception_index = POWERPC_EXCP_PROGRAM;
642 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
646 static always_inline void fpscr_set_rounding_mode (void)
648 int rnd_type;
650 /* Set rounding mode */
651 switch (fpscr_rn) {
652 case 0:
653 /* Best approximation (round to nearest) */
654 rnd_type = float_round_nearest_even;
655 break;
656 case 1:
657 /* Smaller magnitude (round toward zero) */
658 rnd_type = float_round_to_zero;
659 break;
660 case 2:
661 /* Round toward +infinite */
662 rnd_type = float_round_up;
663 break;
664 default:
665 case 3:
666 /* Round toward -infinite */
667 rnd_type = float_round_down;
668 break;
670 set_float_rounding_mode(rnd_type, &env->fp_status);
673 void helper_fpscr_setbit (uint32_t bit)
675 int prev;
677 prev = (env->fpscr >> bit) & 1;
678 env->fpscr |= 1 << bit;
679 if (prev == 0) {
680 switch (bit) {
681 case FPSCR_VX:
682 env->fpscr |= 1 << FPSCR_FX;
683 if (fpscr_ve)
684 goto raise_ve;
685 case FPSCR_OX:
686 env->fpscr |= 1 << FPSCR_FX;
687 if (fpscr_oe)
688 goto raise_oe;
689 break;
690 case FPSCR_UX:
691 env->fpscr |= 1 << FPSCR_FX;
692 if (fpscr_ue)
693 goto raise_ue;
694 break;
695 case FPSCR_ZX:
696 env->fpscr |= 1 << FPSCR_FX;
697 if (fpscr_ze)
698 goto raise_ze;
699 break;
700 case FPSCR_XX:
701 env->fpscr |= 1 << FPSCR_FX;
702 if (fpscr_xe)
703 goto raise_xe;
704 break;
705 case FPSCR_VXSNAN:
706 case FPSCR_VXISI:
707 case FPSCR_VXIDI:
708 case FPSCR_VXZDZ:
709 case FPSCR_VXIMZ:
710 case FPSCR_VXVC:
711 case FPSCR_VXSOFT:
712 case FPSCR_VXSQRT:
713 case FPSCR_VXCVI:
714 env->fpscr |= 1 << FPSCR_VX;
715 env->fpscr |= 1 << FPSCR_FX;
716 if (fpscr_ve != 0)
717 goto raise_ve;
718 break;
719 case FPSCR_VE:
720 if (fpscr_vx != 0) {
721 raise_ve:
722 env->error_code = POWERPC_EXCP_FP;
723 if (fpscr_vxsnan)
724 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
725 if (fpscr_vxisi)
726 env->error_code |= POWERPC_EXCP_FP_VXISI;
727 if (fpscr_vxidi)
728 env->error_code |= POWERPC_EXCP_FP_VXIDI;
729 if (fpscr_vxzdz)
730 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
731 if (fpscr_vximz)
732 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
733 if (fpscr_vxvc)
734 env->error_code |= POWERPC_EXCP_FP_VXVC;
735 if (fpscr_vxsoft)
736 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
737 if (fpscr_vxsqrt)
738 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
739 if (fpscr_vxcvi)
740 env->error_code |= POWERPC_EXCP_FP_VXCVI;
741 goto raise_excp;
743 break;
744 case FPSCR_OE:
745 if (fpscr_ox != 0) {
746 raise_oe:
747 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
748 goto raise_excp;
750 break;
751 case FPSCR_UE:
752 if (fpscr_ux != 0) {
753 raise_ue:
754 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
755 goto raise_excp;
757 break;
758 case FPSCR_ZE:
759 if (fpscr_zx != 0) {
760 raise_ze:
761 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
762 goto raise_excp;
764 break;
765 case FPSCR_XE:
766 if (fpscr_xx != 0) {
767 raise_xe:
768 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
769 goto raise_excp;
771 break;
772 case FPSCR_RN1:
773 case FPSCR_RN:
774 fpscr_set_rounding_mode();
775 break;
776 default:
777 break;
778 raise_excp:
779 /* Update the floating-point enabled exception summary */
780 env->fpscr |= 1 << FPSCR_FEX;
781 /* We have to update Rc1 before raising the exception */
782 env->exception_index = POWERPC_EXCP_PROGRAM;
783 break;
788 void helper_store_fpscr (uint64_t arg, uint32_t mask)
791 * We use only the 32 LSB of the incoming fpr
793 uint32_t prev, new;
794 int i;
796 prev = env->fpscr;
797 new = (uint32_t)arg;
798 new &= ~0x90000000;
799 new |= prev & 0x90000000;
800 for (i = 0; i < 7; i++) {
801 if (mask & (1 << i)) {
802 env->fpscr &= ~(0xF << (4 * i));
803 env->fpscr |= new & (0xF << (4 * i));
806 /* Update VX and FEX */
807 if (fpscr_ix != 0)
808 env->fpscr |= 1 << FPSCR_VX;
809 else
810 env->fpscr &= ~(1 << FPSCR_VX);
811 if ((fpscr_ex & fpscr_eex) != 0) {
812 env->fpscr |= 1 << FPSCR_FEX;
813 env->exception_index = POWERPC_EXCP_PROGRAM;
814 /* XXX: we should compute it properly */
815 env->error_code = POWERPC_EXCP_FP;
817 else
818 env->fpscr &= ~(1 << FPSCR_FEX);
819 fpscr_set_rounding_mode();
822 void helper_float_check_status (void)
824 #ifdef CONFIG_SOFTFLOAT
825 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
826 (env->error_code & POWERPC_EXCP_FP)) {
827 /* Differred floating-point exception after target FPR update */
828 if (msr_fe0 != 0 || msr_fe1 != 0)
829 raise_exception_err(env, env->exception_index, env->error_code);
830 } else if (env->fp_status.float_exception_flags & float_flag_overflow) {
831 float_overflow_excp();
832 } else if (env->fp_status.float_exception_flags & float_flag_underflow) {
833 float_underflow_excp();
834 } else if (env->fp_status.float_exception_flags & float_flag_inexact) {
835 float_inexact_excp();
837 #else
838 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
839 (env->error_code & POWERPC_EXCP_FP)) {
840 /* Differred floating-point exception after target FPR update */
841 if (msr_fe0 != 0 || msr_fe1 != 0)
842 raise_exception_err(env, env->exception_index, env->error_code);
844 RETURN();
845 #endif
848 #ifdef CONFIG_SOFTFLOAT
849 void helper_reset_fpstatus (void)
851 env->fp_status.float_exception_flags = 0;
853 #endif
855 /* fadd - fadd. */
856 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
858 CPU_DoubleU farg1, farg2;
860 farg1.ll = arg1;
861 farg2.ll = arg2;
862 #if USE_PRECISE_EMULATION
863 if (unlikely(float64_is_signaling_nan(farg1.d) ||
864 float64_is_signaling_nan(farg2.d))) {
865 /* sNaN addition */
866 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
867 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
868 fpisneg(farg1.d) == fpisneg(farg2.d))) {
869 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
870 } else {
871 /* Magnitude subtraction of infinities */
872 farg1.ll == fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
874 #else
875 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
876 #endif
877 return farg1.ll;
880 /* fsub - fsub. */
881 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
883 CPU_DoubleU farg1, farg2;
885 farg1.ll = arg1;
886 farg2.ll = arg2;
887 #if USE_PRECISE_EMULATION
889 if (unlikely(float64_is_signaling_nan(farg1.d) ||
890 float64_is_signaling_nan(farg2.d))) {
891 /* sNaN subtraction */
892 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
893 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
894 fpisneg(farg1.d) != fpisneg(farg2.d))) {
895 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
896 } else {
897 /* Magnitude subtraction of infinities */
898 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
901 #else
902 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
903 #endif
904 return farg1.ll;
907 /* fmul - fmul. */
908 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
910 CPU_DoubleU farg1, farg2;
912 farg1.ll = arg1;
913 farg2.ll = arg2;
914 #if USE_PRECISE_EMULATION
915 if (unlikely(float64_is_signaling_nan(farg1.d) ||
916 float64_is_signaling_nan(farg2.d))) {
917 /* sNaN multiplication */
918 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
919 } else if (unlikely((isinfinity(farg1.d) && iszero(farg2.d)) ||
920 (iszero(farg1.d) && isinfinity(farg2.d)))) {
921 /* Multiplication of zero by infinity */
922 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
923 } else {
924 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
927 #else
928 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
929 #endif
930 return farg1.ll;
933 /* fdiv - fdiv. */
934 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
936 CPU_DoubleU farg1, farg2;
938 farg1.ll = arg1;
939 farg2.ll = arg2;
940 #if USE_PRECISE_EMULATION
941 if (unlikely(float64_is_signaling_nan(farg1.d) ||
942 float64_is_signaling_nan(farg2.d))) {
943 /* sNaN division */
944 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
945 } else if (unlikely(isinfinity(farg1.d) && isinfinity(farg2.d))) {
946 /* Division of infinity by infinity */
947 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
948 } else if (unlikely(iszero(farg2.d))) {
949 if (iszero(farg1.d)) {
950 /* Division of zero by zero */
951 farg1.ll fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
952 } else {
953 /* Division by zero */
954 farg1.ll = float_zero_divide_excp(farg1.d, farg2.d);
956 } else {
957 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
959 #else
960 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
961 #endif
962 return farg1.ll;
965 /* fabs */
966 uint64_t helper_fabs (uint64_t arg)
968 CPU_DoubleU farg;
970 farg.ll = arg;
971 farg.d = float64_abs(farg.d);
972 return farg.ll;
975 /* fnabs */
976 uint64_t helper_fnabs (uint64_t arg)
978 CPU_DoubleU farg;
980 farg.ll = arg;
981 farg.d = float64_abs(farg.d);
982 farg.d = float64_chs(farg.d);
983 return farg.ll;
986 /* fneg */
987 uint64_t helper_fneg (uint64_t arg)
989 CPU_DoubleU farg;
991 farg.ll = arg;
992 farg.d = float64_chs(farg.d);
993 return farg.ll;
996 /* fctiw - fctiw. */
997 uint64_t helper_fctiw (uint64_t arg)
999 CPU_DoubleU farg;
1000 farg.ll = arg;
1002 if (unlikely(float64_is_signaling_nan(farg.d))) {
1003 /* sNaN conversion */
1004 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1005 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1006 /* qNan / infinity conversion */
1007 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1008 } else {
1009 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1010 #if USE_PRECISE_EMULATION
1011 /* XXX: higher bits are not supposed to be significant.
1012 * to make tests easier, return the same as a real PowerPC 750
1014 farg.ll |= 0xFFF80000ULL << 32;
1015 #endif
1017 return farg.ll;
1020 /* fctiwz - fctiwz. */
1021 uint64_t helper_fctiwz (uint64_t arg)
1023 CPU_DoubleU farg;
1024 farg.ll = arg;
1026 if (unlikely(float64_is_signaling_nan(farg.d))) {
1027 /* sNaN conversion */
1028 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1029 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1030 /* qNan / infinity conversion */
1031 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1032 } else {
1033 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1034 #if USE_PRECISE_EMULATION
1035 /* XXX: higher bits are not supposed to be significant.
1036 * to make tests easier, return the same as a real PowerPC 750
1038 farg.ll |= 0xFFF80000ULL << 32;
1039 #endif
1041 return farg.ll;
1044 #if defined(TARGET_PPC64)
1045 /* fcfid - fcfid. */
1046 uint64_t helper_fcfid (uint64_t arg)
1048 CPU_DoubleU farg;
1049 farg.d = int64_to_float64(arg, &env->fp_status);
1050 return farg.ll;
1053 /* fctid - fctid. */
1054 uint64_t helper_fctid (uint64_t arg)
1056 CPU_DoubleU farg;
1057 farg.ll = arg;
1059 if (unlikely(float64_is_signaling_nan(farg.d))) {
1060 /* sNaN conversion */
1061 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1062 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1063 /* qNan / infinity conversion */
1064 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1065 } else {
1066 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1068 return farg.ll;
1071 /* fctidz - fctidz. */
1072 uint64_t helper_fctidz (uint64_t arg)
1074 CPU_DoubleU farg;
1075 farg.ll = arg;
1077 if (unlikely(float64_is_signaling_nan(farg.d))) {
1078 /* sNaN conversion */
1079 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1080 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1081 /* qNan / infinity conversion */
1082 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1083 } else {
1084 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1086 return farg.ll;
1089 #endif
1091 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1093 CPU_DoubleU farg;
1094 farg.ll = arg;
1096 if (unlikely(float64_is_signaling_nan(farg.d))) {
1097 /* sNaN round */
1098 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1099 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1100 /* qNan / infinity round */
1101 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1102 } else {
1103 set_float_rounding_mode(rounding_mode, &env->fp_status);
1104 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1105 /* Restore rounding mode from FPSCR */
1106 fpscr_set_rounding_mode();
1108 return farg.ll;
1111 uint64_t helper_frin (uint64_t arg)
1113 return do_fri(arg, float_round_nearest_even);
1116 uint64_t helper_friz (uint64_t arg)
1118 return do_fri(arg, float_round_to_zero);
1121 uint64_t helper_frip (uint64_t arg)
1123 return do_fri(arg, float_round_up);
1126 uint64_t helper_frim (uint64_t arg)
1128 return do_fri(arg, float_round_down);
1131 /* fmadd - fmadd. */
1132 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1134 CPU_DoubleU farg1, farg2, farg3;
1136 farg1.ll = arg1;
1137 farg2.ll = arg2;
1138 farg3.ll = arg3;
1139 #if USE_PRECISE_EMULATION
1140 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1141 float64_is_signaling_nan(farg2.d) ||
1142 float64_is_signaling_nan(farg3.d))) {
1143 /* sNaN operation */
1144 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1145 } else {
1146 #ifdef FLOAT128
1147 /* This is the way the PowerPC specification defines it */
1148 float128 ft0_128, ft1_128;
1150 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1151 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1152 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1153 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1154 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1155 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1156 #else
1157 /* This is OK on x86 hosts */
1158 farg1.d = (farg1.d * farg2.d) + farg3.d;
1159 #endif
1161 #else
1162 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1163 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1164 #endif
1165 return farg1.ll;
1168 /* fmsub - fmsub. */
1169 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1171 CPU_DoubleU farg1, farg2, farg3;
1173 farg1.ll = arg1;
1174 farg2.ll = arg2;
1175 farg3.ll = arg3;
1176 #if USE_PRECISE_EMULATION
1177 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1178 float64_is_signaling_nan(farg2.d) ||
1179 float64_is_signaling_nan(farg3.d))) {
1180 /* sNaN operation */
1181 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1182 } else {
1183 #ifdef FLOAT128
1184 /* This is the way the PowerPC specification defines it */
1185 float128 ft0_128, ft1_128;
1187 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1188 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1189 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1190 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1191 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1192 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1193 #else
1194 /* This is OK on x86 hosts */
1195 farg1.d = (farg1.d * farg2.d) - farg3.d;
1196 #endif
1198 #else
1199 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1200 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1201 #endif
1202 return farg1.ll;
1205 /* fnmadd - fnmadd. */
1206 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1208 CPU_DoubleU farg1, farg2, farg3;
1210 farg1.ll = arg1;
1211 farg2.ll = arg2;
1212 farg3.ll = arg3;
1214 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1215 float64_is_signaling_nan(farg2.d) ||
1216 float64_is_signaling_nan(farg3.d))) {
1217 /* sNaN operation */
1218 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1219 } else {
1220 #if USE_PRECISE_EMULATION
1221 #ifdef FLOAT128
1222 /* This is the way the PowerPC specification defines it */
1223 float128 ft0_128, ft1_128;
1225 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1226 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1227 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1228 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1229 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1230 farg1.d= float128_to_float64(ft0_128, &env->fp_status);
1231 #else
1232 /* This is OK on x86 hosts */
1233 farg1.d = (farg1.d * farg2.d) + farg3.d;
1234 #endif
1235 #else
1236 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1237 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1238 #endif
1239 if (likely(!isnan(farg1.d)))
1240 farg1.d = float64_chs(farg1.d);
1242 return farg1.ll;
1245 /* fnmsub - fnmsub. */
1246 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1248 CPU_DoubleU farg1, farg2, farg3;
1250 farg1.ll = arg1;
1251 farg2.ll = arg2;
1252 farg3.ll = arg3;
1254 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1255 float64_is_signaling_nan(farg2.d) ||
1256 float64_is_signaling_nan(farg3.d))) {
1257 /* sNaN operation */
1258 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1259 } else {
1260 #if USE_PRECISE_EMULATION
1261 #ifdef FLOAT128
1262 /* This is the way the PowerPC specification defines it */
1263 float128 ft0_128, ft1_128;
1265 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1266 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1267 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1268 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1269 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1270 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1271 #else
1272 /* This is OK on x86 hosts */
1273 farg1.d = (farg1.d * farg2.d) - farg3.d;
1274 #endif
1275 #else
1276 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1277 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1278 #endif
1279 if (likely(!isnan(farg1.d)))
1280 farg1.d = float64_chs(farg1.d);
1282 return farg1.ll;
1285 /* frsp - frsp. */
1286 uint64_t helper_frsp (uint64_t arg)
1288 CPU_DoubleU farg;
1289 farg.ll = arg;
1291 #if USE_PRECISE_EMULATION
1292 if (unlikely(float64_is_signaling_nan(farg.d))) {
1293 /* sNaN square root */
1294 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1295 } else {
1296 fard.d = float64_to_float32(farg.d, &env->fp_status);
1298 #else
1299 farg.d = float64_to_float32(farg.d, &env->fp_status);
1300 #endif
1301 return farg.ll;
1304 /* fsqrt - fsqrt. */
1305 uint64_t helper_fsqrt (uint64_t arg)
1307 CPU_DoubleU farg;
1308 farg.ll = arg;
1310 if (unlikely(float64_is_signaling_nan(farg.d))) {
1311 /* sNaN square root */
1312 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1313 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1314 /* Square root of a negative nonzero number */
1315 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1316 } else {
1317 farg.d = float64_sqrt(farg.d, &env->fp_status);
1319 return farg.ll;
1322 /* fre - fre. */
1323 uint64_t helper_fre (uint64_t arg)
1325 CPU_DoubleU farg;
1326 farg.ll = arg;
1328 if (unlikely(float64_is_signaling_nan(farg.d))) {
1329 /* sNaN reciprocal */
1330 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1331 } else if (unlikely(iszero(farg.d))) {
1332 /* Zero reciprocal */
1333 farg.ll = float_zero_divide_excp(1.0, farg.d);
1334 } else if (likely(isnormal(farg.d))) {
1335 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1336 } else {
1337 if (farg.ll == 0x8000000000000000ULL) {
1338 farg.ll = 0xFFF0000000000000ULL;
1339 } else if (farg.ll == 0x0000000000000000ULL) {
1340 farg.ll = 0x7FF0000000000000ULL;
1341 } else if (isnan(farg.d)) {
1342 farg.ll = 0x7FF8000000000000ULL;
1343 } else if (fpisneg(farg.d)) {
1344 farg.ll = 0x8000000000000000ULL;
1345 } else {
1346 farg.ll = 0x0000000000000000ULL;
1349 return farg.d;
1352 /* fres - fres. */
1353 uint64_t helper_fres (uint64_t arg)
1355 CPU_DoubleU farg;
1356 farg.ll = arg;
1358 if (unlikely(float64_is_signaling_nan(farg.d))) {
1359 /* sNaN reciprocal */
1360 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1361 } else if (unlikely(iszero(farg.d))) {
1362 /* Zero reciprocal */
1363 farg.ll = float_zero_divide_excp(1.0, farg.d);
1364 } else if (likely(isnormal(farg.d))) {
1365 #if USE_PRECISE_EMULATION
1366 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1367 farg.d = float64_to_float32(farg.d, &env->fp_status);
1368 #else
1369 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1370 #endif
1371 } else {
1372 if (farg.ll == 0x8000000000000000ULL) {
1373 farg.ll = 0xFFF0000000000000ULL;
1374 } else if (farg.ll == 0x0000000000000000ULL) {
1375 farg.ll = 0x7FF0000000000000ULL;
1376 } else if (isnan(farg.d)) {
1377 farg.ll = 0x7FF8000000000000ULL;
1378 } else if (fpisneg(farg.d)) {
1379 farg.ll = 0x8000000000000000ULL;
1380 } else {
1381 farg.ll = 0x0000000000000000ULL;
1384 return farg.ll;
1387 /* frsqrte - frsqrte. */
1388 uint64_t helper_frsqrte (uint64_t arg)
1390 CPU_DoubleU farg;
1391 farg.ll = arg;
1393 if (unlikely(float64_is_signaling_nan(farg.d))) {
1394 /* sNaN reciprocal square root */
1395 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1396 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1397 /* Reciprocal square root of a negative nonzero number */
1398 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1399 } else if (likely(isnormal(farg.d))) {
1400 farg.d = float64_sqrt(farg.d, &env->fp_status);
1401 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1402 } else {
1403 if (farg.ll == 0x8000000000000000ULL) {
1404 farg.ll = 0xFFF0000000000000ULL;
1405 } else if (farg.ll == 0x0000000000000000ULL) {
1406 farg.ll = 0x7FF0000000000000ULL;
1407 } else if (isnan(farg.d)) {
1408 farg.ll |= 0x000FFFFFFFFFFFFFULL;
1409 } else if (fpisneg(farg.d)) {
1410 farg.ll = 0x7FF8000000000000ULL;
1411 } else {
1412 farg.ll = 0x0000000000000000ULL;
1415 return farg.ll;
1418 /* fsel - fsel. */
1419 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1421 CPU_DoubleU farg1, farg2, farg3;
1423 farg1.ll = arg1;
1424 farg2.ll = arg2;
1425 farg3.ll = arg3;
1427 if (!fpisneg(farg1.d) || iszero(farg1.d))
1428 return farg2.ll;
1429 else
1430 return farg2.ll;
1433 uint32_t helper_fcmpu (uint64_t arg1, uint64_t arg2)
1435 CPU_DoubleU farg1, farg2;
1436 uint32_t ret = 0;
1437 farg1.ll = arg1;
1438 farg2.ll = arg2;
1440 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1441 float64_is_signaling_nan(farg2.d))) {
1442 /* sNaN comparison */
1443 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1444 } else {
1445 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1446 ret = 0x08UL;
1447 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1448 ret = 0x04UL;
1449 } else {
1450 ret = 0x02UL;
1453 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1454 env->fpscr |= ret << FPSCR_FPRF;
1455 return ret;
1458 uint32_t helper_fcmpo (uint64_t arg1, uint64_t arg2)
1460 CPU_DoubleU farg1, farg2;
1461 uint32_t ret = 0;
1462 farg1.ll = arg1;
1463 farg2.ll = arg2;
1465 if (unlikely(float64_is_nan(farg1.d) ||
1466 float64_is_nan(farg2.d))) {
1467 if (float64_is_signaling_nan(farg1.d) ||
1468 float64_is_signaling_nan(farg2.d)) {
1469 /* sNaN comparison */
1470 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1471 POWERPC_EXCP_FP_VXVC);
1472 } else {
1473 /* qNaN comparison */
1474 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1476 } else {
1477 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1478 ret = 0x08UL;
1479 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1480 ret = 0x04UL;
1481 } else {
1482 ret = 0x02UL;
1485 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1486 env->fpscr |= ret << FPSCR_FPRF;
1487 return ret;
1490 #if !defined (CONFIG_USER_ONLY)
1491 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
1493 void do_store_msr (void)
1495 T0 = hreg_store_msr(env, T0, 0);
1496 if (T0 != 0) {
1497 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1498 raise_exception(env, T0);
1502 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1503 target_ulong msrm, int keep_msrh)
1505 #if defined(TARGET_PPC64)
1506 if (msr & (1ULL << MSR_SF)) {
1507 nip = (uint64_t)nip;
1508 msr &= (uint64_t)msrm;
1509 } else {
1510 nip = (uint32_t)nip;
1511 msr = (uint32_t)(msr & msrm);
1512 if (keep_msrh)
1513 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1515 #else
1516 nip = (uint32_t)nip;
1517 msr &= (uint32_t)msrm;
1518 #endif
1519 /* XXX: beware: this is false if VLE is supported */
1520 env->nip = nip & ~((target_ulong)0x00000003);
1521 hreg_store_msr(env, msr, 1);
1522 #if defined (DEBUG_OP)
1523 cpu_dump_rfi(env->nip, env->msr);
1524 #endif
1525 /* No need to raise an exception here,
1526 * as rfi is always the last insn of a TB
1528 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1531 void helper_rfi (void)
1533 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1534 ~((target_ulong)0xFFFF0000), 1);
1537 #if defined(TARGET_PPC64)
1538 void helper_rfid (void)
1540 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1541 ~((target_ulong)0xFFFF0000), 0);
1544 void helper_hrfid (void)
1546 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1547 ~((target_ulong)0xFFFF0000), 0);
1549 #endif
1550 #endif
1552 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1554 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1555 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1556 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1557 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1558 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1559 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1563 #if defined(TARGET_PPC64)
1564 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1566 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1567 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1568 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1569 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1570 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1571 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1573 #endif
1575 /*****************************************************************************/
1576 /* PowerPC 601 specific instructions (POWER bridge) */
1577 void do_POWER_abso (void)
1579 if ((int32_t)T0 == INT32_MIN) {
1580 T0 = INT32_MAX;
1581 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1582 } else if ((int32_t)T0 < 0) {
1583 T0 = -T0;
1584 env->xer &= ~(1 << XER_OV);
1585 } else {
1586 env->xer &= ~(1 << XER_OV);
1590 void do_POWER_clcs (void)
1592 switch (T0) {
1593 case 0x0CUL:
1594 /* Instruction cache line size */
1595 T0 = env->icache_line_size;
1596 break;
1597 case 0x0DUL:
1598 /* Data cache line size */
1599 T0 = env->dcache_line_size;
1600 break;
1601 case 0x0EUL:
1602 /* Minimum cache line size */
1603 T0 = env->icache_line_size < env->dcache_line_size ?
1604 env->icache_line_size : env->dcache_line_size;
1605 break;
1606 case 0x0FUL:
1607 /* Maximum cache line size */
1608 T0 = env->icache_line_size > env->dcache_line_size ?
1609 env->icache_line_size : env->dcache_line_size;
1610 break;
1611 default:
1612 /* Undefined */
1613 break;
1617 void do_POWER_div (void)
1619 uint64_t tmp;
1621 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1622 (int32_t)T1 == 0) {
1623 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1624 env->spr[SPR_MQ] = 0;
1625 } else {
1626 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1627 env->spr[SPR_MQ] = tmp % T1;
1628 T0 = tmp / (int32_t)T1;
1632 void do_POWER_divo (void)
1634 int64_t tmp;
1636 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1637 (int32_t)T1 == 0) {
1638 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1639 env->spr[SPR_MQ] = 0;
1640 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1641 } else {
1642 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1643 env->spr[SPR_MQ] = tmp % T1;
1644 tmp /= (int32_t)T1;
1645 if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
1646 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1647 } else {
1648 env->xer &= ~(1 << XER_OV);
1650 T0 = tmp;
1654 void do_POWER_divs (void)
1656 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1657 (int32_t)T1 == 0) {
1658 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1659 env->spr[SPR_MQ] = 0;
1660 } else {
1661 env->spr[SPR_MQ] = T0 % T1;
1662 T0 = (int32_t)T0 / (int32_t)T1;
1666 void do_POWER_divso (void)
1668 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1669 (int32_t)T1 == 0) {
1670 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1671 env->spr[SPR_MQ] = 0;
1672 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1673 } else {
1674 T0 = (int32_t)T0 / (int32_t)T1;
1675 env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
1676 env->xer &= ~(1 << XER_OV);
1680 void do_POWER_dozo (void)
1682 if ((int32_t)T1 > (int32_t)T0) {
1683 T2 = T0;
1684 T0 = T1 - T0;
1685 if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
1686 ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
1687 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1688 } else {
1689 env->xer &= ~(1 << XER_OV);
1691 } else {
1692 T0 = 0;
1693 env->xer &= ~(1 << XER_OV);
1697 void do_POWER_maskg (void)
1699 uint32_t ret;
1701 if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
1702 ret = UINT32_MAX;
1703 } else {
1704 ret = (UINT32_MAX >> ((uint32_t)T0)) ^
1705 ((UINT32_MAX >> ((uint32_t)T1)) >> 1);
1706 if ((uint32_t)T0 > (uint32_t)T1)
1707 ret = ~ret;
1709 T0 = ret;
1712 void do_POWER_mulo (void)
1714 uint64_t tmp;
1716 tmp = (uint64_t)T0 * (uint64_t)T1;
1717 env->spr[SPR_MQ] = tmp >> 32;
1718 T0 = tmp;
1719 if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) {
1720 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1721 } else {
1722 env->xer &= ~(1 << XER_OV);
1726 #if !defined (CONFIG_USER_ONLY)
1727 void do_POWER_rac (void)
1729 mmu_ctx_t ctx;
1730 int nb_BATs;
1732 /* We don't have to generate many instances of this instruction,
1733 * as rac is supervisor only.
1735 /* XXX: FIX THIS: Pretend we have no BAT */
1736 nb_BATs = env->nb_BATs;
1737 env->nb_BATs = 0;
1738 if (get_physical_address(env, &ctx, T0, 0, ACCESS_INT) == 0)
1739 T0 = ctx.raddr;
1740 env->nb_BATs = nb_BATs;
1743 void helper_rfsvc (void)
1745 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1748 void do_store_hid0_601 (void)
1750 uint32_t hid0;
1752 hid0 = env->spr[SPR_HID0];
1753 if ((T0 ^ hid0) & 0x00000008) {
1754 /* Change current endianness */
1755 env->hflags &= ~(1 << MSR_LE);
1756 env->hflags_nmsr &= ~(1 << MSR_LE);
1757 env->hflags_nmsr |= (1 << MSR_LE) & (((T0 >> 3) & 1) << MSR_LE);
1758 env->hflags |= env->hflags_nmsr;
1759 if (loglevel != 0) {
1760 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
1761 __func__, T0 & 0x8 ? 'l' : 'b', env->hflags);
1764 env->spr[SPR_HID0] = T0;
1766 #endif
1768 /*****************************************************************************/
1769 /* 602 specific instructions */
1770 /* mfrom is the most crazy instruction ever seen, imho ! */
1771 /* Real implementation uses a ROM table. Do the same */
1772 #define USE_MFROM_ROM_TABLE
1773 target_ulong helper_602_mfrom (target_ulong arg)
1775 if (likely(arg < 602)) {
1776 #if defined(USE_MFROM_ROM_TABLE)
1777 #include "mfrom_table.c"
1778 return mfrom_ROM_table[T0];
1779 #else
1780 double d;
1781 /* Extremly decomposed:
1782 * -arg / 256
1783 * return 256 * log10(10 + 1.0) + 0.5
1785 d = arg;
1786 d = float64_div(d, 256, &env->fp_status);
1787 d = float64_chs(d);
1788 d = exp10(d); // XXX: use float emulation function
1789 d = float64_add(d, 1.0, &env->fp_status);
1790 d = log10(d); // XXX: use float emulation function
1791 d = float64_mul(d, 256, &env->fp_status);
1792 d = float64_add(d, 0.5, &env->fp_status);
1793 return float64_round_to_int(d, &env->fp_status);
1794 #endif
1795 } else {
1796 return 0;
1800 /*****************************************************************************/
1801 /* Embedded PowerPC specific helpers */
1803 /* XXX: to be improved to check access rights when in user-mode */
1804 void do_load_dcr (void)
1806 target_ulong val;
1808 if (unlikely(env->dcr_env == NULL)) {
1809 if (loglevel != 0) {
1810 fprintf(logfile, "No DCR environment\n");
1812 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1813 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1814 } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) {
1815 if (loglevel != 0) {
1816 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1818 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1819 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1820 } else {
1821 T0 = val;
1825 void do_store_dcr (void)
1827 if (unlikely(env->dcr_env == NULL)) {
1828 if (loglevel != 0) {
1829 fprintf(logfile, "No DCR environment\n");
1831 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1832 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1833 } else if (unlikely(ppc_dcr_write(env->dcr_env, T0, T1) != 0)) {
1834 if (loglevel != 0) {
1835 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1837 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1838 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1842 #if !defined(CONFIG_USER_ONLY)
1843 void helper_40x_rfci (void)
1845 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1846 ~((target_ulong)0xFFFF0000), 0);
1849 void helper_rfci (void)
1851 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1852 ~((target_ulong)0x3FFF0000), 0);
1855 void helper_rfdi (void)
1857 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1858 ~((target_ulong)0x3FFF0000), 0);
1861 void helper_rfmci (void)
1863 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1864 ~((target_ulong)0x3FFF0000), 0);
1867 void do_load_403_pb (int num)
1869 T0 = env->pb[num];
1872 void do_store_403_pb (int num)
1874 if (likely(env->pb[num] != T0)) {
1875 env->pb[num] = T0;
1876 /* Should be optimized */
1877 tlb_flush(env, 1);
1880 #endif
1882 /* 440 specific */
1883 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1885 target_ulong mask;
1886 int i;
1888 i = 1;
1889 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1890 if ((high & mask) == 0) {
1891 if (update_Rc) {
1892 env->crf[0] = 0x4;
1894 goto done;
1896 i++;
1898 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1899 if ((low & mask) == 0) {
1900 if (update_Rc) {
1901 env->crf[0] = 0x8;
1903 goto done;
1905 i++;
1907 if (update_Rc) {
1908 env->crf[0] = 0x2;
1910 done:
1911 env->xer = (env->xer & ~0x7F) | i;
1912 if (update_Rc) {
1913 env->crf[0] |= xer_so;
1915 return i;
1918 /*****************************************************************************/
1919 /* SPE extension helpers */
1920 /* Use a table to make this quicker */
1921 static uint8_t hbrev[16] = {
1922 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1923 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1926 static always_inline uint8_t byte_reverse (uint8_t val)
1928 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1931 static always_inline uint32_t word_reverse (uint32_t val)
1933 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1934 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1937 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
1938 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
1940 uint32_t a, b, d, mask;
1942 mask = UINT32_MAX >> (32 - MASKBITS);
1943 a = arg1 & mask;
1944 b = arg2 & mask;
1945 d = word_reverse(1 + word_reverse(a | ~b));
1946 return (arg1 & ~mask) | (d & b);
1949 uint32_t helper_cntlsw32 (uint32_t val)
1951 if (val & 0x80000000)
1952 return clz32(~val);
1953 else
1954 return clz32(val);
1957 uint32_t helper_cntlzw32 (uint32_t val)
1959 return clz32(val);
1962 /* Single-precision floating-point conversions */
1963 static always_inline uint32_t efscfsi (uint32_t val)
1965 CPU_FloatU u;
1967 u.f = int32_to_float32(val, &env->spe_status);
1969 return u.l;
1972 static always_inline uint32_t efscfui (uint32_t val)
1974 CPU_FloatU u;
1976 u.f = uint32_to_float32(val, &env->spe_status);
1978 return u.l;
1981 static always_inline int32_t efsctsi (uint32_t val)
1983 CPU_FloatU u;
1985 u.l = val;
1986 /* NaN are not treated the same way IEEE 754 does */
1987 if (unlikely(isnan(u.f)))
1988 return 0;
1990 return float32_to_int32(u.f, &env->spe_status);
1993 static always_inline uint32_t efsctui (uint32_t val)
1995 CPU_FloatU u;
1997 u.l = val;
1998 /* NaN are not treated the same way IEEE 754 does */
1999 if (unlikely(isnan(u.f)))
2000 return 0;
2002 return float32_to_uint32(u.f, &env->spe_status);
2005 static always_inline uint32_t efsctsiz (uint32_t val)
2007 CPU_FloatU u;
2009 u.l = val;
2010 /* NaN are not treated the same way IEEE 754 does */
2011 if (unlikely(isnan(u.f)))
2012 return 0;
2014 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2017 static always_inline uint32_t efsctuiz (uint32_t val)
2019 CPU_FloatU u;
2021 u.l = val;
2022 /* NaN are not treated the same way IEEE 754 does */
2023 if (unlikely(isnan(u.f)))
2024 return 0;
2026 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2029 static always_inline uint32_t efscfsf (uint32_t val)
2031 CPU_FloatU u;
2032 float32 tmp;
2034 u.f = int32_to_float32(val, &env->spe_status);
2035 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2036 u.f = float32_div(u.f, tmp, &env->spe_status);
2038 return u.l;
2041 static always_inline uint32_t efscfuf (uint32_t val)
2043 CPU_FloatU u;
2044 float32 tmp;
2046 u.f = uint32_to_float32(val, &env->spe_status);
2047 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2048 u.f = float32_div(u.f, tmp, &env->spe_status);
2050 return u.l;
2053 static always_inline uint32_t efsctsf (uint32_t val)
2055 CPU_FloatU u;
2056 float32 tmp;
2058 u.l = val;
2059 /* NaN are not treated the same way IEEE 754 does */
2060 if (unlikely(isnan(u.f)))
2061 return 0;
2062 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2063 u.f = float32_mul(u.f, tmp, &env->spe_status);
2065 return float32_to_int32(u.f, &env->spe_status);
2068 static always_inline uint32_t efsctuf (uint32_t val)
2070 CPU_FloatU u;
2071 float32 tmp;
2073 u.l = val;
2074 /* NaN are not treated the same way IEEE 754 does */
2075 if (unlikely(isnan(u.f)))
2076 return 0;
2077 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2078 u.f = float32_mul(u.f, tmp, &env->spe_status);
2080 return float32_to_uint32(u.f, &env->spe_status);
2083 #define HELPER_SPE_SINGLE_CONV(name) \
2084 uint32_t helper_e##name (uint32_t val) \
2086 return e##name(val); \
2088 /* efscfsi */
2089 HELPER_SPE_SINGLE_CONV(fscfsi);
2090 /* efscfui */
2091 HELPER_SPE_SINGLE_CONV(fscfui);
2092 /* efscfuf */
2093 HELPER_SPE_SINGLE_CONV(fscfuf);
2094 /* efscfsf */
2095 HELPER_SPE_SINGLE_CONV(fscfsf);
2096 /* efsctsi */
2097 HELPER_SPE_SINGLE_CONV(fsctsi);
2098 /* efsctui */
2099 HELPER_SPE_SINGLE_CONV(fsctui);
2100 /* efsctsiz */
2101 HELPER_SPE_SINGLE_CONV(fsctsiz);
2102 /* efsctuiz */
2103 HELPER_SPE_SINGLE_CONV(fsctuiz);
2104 /* efsctsf */
2105 HELPER_SPE_SINGLE_CONV(fsctsf);
2106 /* efsctuf */
2107 HELPER_SPE_SINGLE_CONV(fsctuf);
2109 #define HELPER_SPE_VECTOR_CONV(name) \
2110 uint64_t helper_ev##name (uint64_t val) \
2112 return ((uint64_t)e##name(val >> 32) << 32) | \
2113 (uint64_t)e##name(val); \
2115 /* evfscfsi */
2116 HELPER_SPE_VECTOR_CONV(fscfsi);
2117 /* evfscfui */
2118 HELPER_SPE_VECTOR_CONV(fscfui);
2119 /* evfscfuf */
2120 HELPER_SPE_VECTOR_CONV(fscfuf);
2121 /* evfscfsf */
2122 HELPER_SPE_VECTOR_CONV(fscfsf);
2123 /* evfsctsi */
2124 HELPER_SPE_VECTOR_CONV(fsctsi);
2125 /* evfsctui */
2126 HELPER_SPE_VECTOR_CONV(fsctui);
2127 /* evfsctsiz */
2128 HELPER_SPE_VECTOR_CONV(fsctsiz);
2129 /* evfsctuiz */
2130 HELPER_SPE_VECTOR_CONV(fsctuiz);
2131 /* evfsctsf */
2132 HELPER_SPE_VECTOR_CONV(fsctsf);
2133 /* evfsctuf */
2134 HELPER_SPE_VECTOR_CONV(fsctuf);
2136 /* Single-precision floating-point arithmetic */
2137 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2139 CPU_FloatU u1, u2;
2140 u1.l = op1;
2141 u2.l = op2;
2142 u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2143 return u1.l;
2146 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2148 CPU_FloatU u1, u2;
2149 u1.l = op1;
2150 u2.l = op2;
2151 u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2152 return u1.l;
2155 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2157 CPU_FloatU u1, u2;
2158 u1.l = op1;
2159 u2.l = op2;
2160 u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2161 return u1.l;
2164 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2166 CPU_FloatU u1, u2;
2167 u1.l = op1;
2168 u2.l = op2;
2169 u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2170 return u1.l;
2173 #define HELPER_SPE_SINGLE_ARITH(name) \
2174 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2176 return e##name(op1, op2); \
2178 /* efsadd */
2179 HELPER_SPE_SINGLE_ARITH(fsadd);
2180 /* efssub */
2181 HELPER_SPE_SINGLE_ARITH(fssub);
2182 /* efsmul */
2183 HELPER_SPE_SINGLE_ARITH(fsmul);
2184 /* efsdiv */
2185 HELPER_SPE_SINGLE_ARITH(fsdiv);
2187 #define HELPER_SPE_VECTOR_ARITH(name) \
2188 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
2190 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
2191 (uint64_t)e##name(op1, op2); \
2193 /* evfsadd */
2194 HELPER_SPE_VECTOR_ARITH(fsadd);
2195 /* evfssub */
2196 HELPER_SPE_VECTOR_ARITH(fssub);
2197 /* evfsmul */
2198 HELPER_SPE_VECTOR_ARITH(fsmul);
2199 /* evfsdiv */
2200 HELPER_SPE_VECTOR_ARITH(fsdiv);
2202 /* Single-precision floating-point comparisons */
2203 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2205 CPU_FloatU u1, u2;
2206 u1.l = op1;
2207 u2.l = op2;
2208 return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2211 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2213 CPU_FloatU u1, u2;
2214 u1.l = op1;
2215 u2.l = op2;
2216 return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2219 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2221 CPU_FloatU u1, u2;
2222 u1.l = op1;
2223 u2.l = op2;
2224 return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2227 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2229 /* XXX: TODO: test special values (NaN, infinites, ...) */
2230 return efststlt(op1, op2);
2233 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2235 /* XXX: TODO: test special values (NaN, infinites, ...) */
2236 return efststgt(op1, op2);
2239 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2241 /* XXX: TODO: test special values (NaN, infinites, ...) */
2242 return efststeq(op1, op2);
2245 #define HELPER_SINGLE_SPE_CMP(name) \
2246 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2248 return e##name(op1, op2) << 2; \
2250 /* efststlt */
2251 HELPER_SINGLE_SPE_CMP(fststlt);
2252 /* efststgt */
2253 HELPER_SINGLE_SPE_CMP(fststgt);
2254 /* efststeq */
2255 HELPER_SINGLE_SPE_CMP(fststeq);
2256 /* efscmplt */
2257 HELPER_SINGLE_SPE_CMP(fscmplt);
2258 /* efscmpgt */
2259 HELPER_SINGLE_SPE_CMP(fscmpgt);
2260 /* efscmpeq */
2261 HELPER_SINGLE_SPE_CMP(fscmpeq);
2263 static always_inline uint32_t evcmp_merge (int t0, int t1)
2265 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2268 #define HELPER_VECTOR_SPE_CMP(name) \
2269 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
2271 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
2273 /* evfststlt */
2274 HELPER_VECTOR_SPE_CMP(fststlt);
2275 /* evfststgt */
2276 HELPER_VECTOR_SPE_CMP(fststgt);
2277 /* evfststeq */
2278 HELPER_VECTOR_SPE_CMP(fststeq);
2279 /* evfscmplt */
2280 HELPER_VECTOR_SPE_CMP(fscmplt);
2281 /* evfscmpgt */
2282 HELPER_VECTOR_SPE_CMP(fscmpgt);
2283 /* evfscmpeq */
2284 HELPER_VECTOR_SPE_CMP(fscmpeq);
2286 /* Double-precision floating-point conversion */
2287 uint64_t helper_efdcfsi (uint32_t val)
2289 CPU_DoubleU u;
2291 u.d = int32_to_float64(val, &env->spe_status);
2293 return u.ll;
2296 uint64_t helper_efdcfsid (uint64_t val)
2298 CPU_DoubleU u;
2300 u.d = int64_to_float64(val, &env->spe_status);
2302 return u.ll;
2305 uint64_t helper_efdcfui (uint32_t val)
2307 CPU_DoubleU u;
2309 u.d = uint32_to_float64(val, &env->spe_status);
2311 return u.ll;
2314 uint64_t helper_efdcfuid (uint64_t val)
2316 CPU_DoubleU u;
2318 u.d = uint64_to_float64(val, &env->spe_status);
2320 return u.ll;
2323 uint32_t helper_efdctsi (uint64_t val)
2325 CPU_DoubleU u;
2327 u.ll = val;
2328 /* NaN are not treated the same way IEEE 754 does */
2329 if (unlikely(isnan(u.d)))
2330 return 0;
2332 return float64_to_int32(u.d, &env->spe_status);
2335 uint32_t helper_efdctui (uint64_t val)
2337 CPU_DoubleU u;
2339 u.ll = val;
2340 /* NaN are not treated the same way IEEE 754 does */
2341 if (unlikely(isnan(u.d)))
2342 return 0;
2344 return float64_to_uint32(u.d, &env->spe_status);
2347 uint32_t helper_efdctsiz (uint64_t val)
2349 CPU_DoubleU u;
2351 u.ll = val;
2352 /* NaN are not treated the same way IEEE 754 does */
2353 if (unlikely(isnan(u.d)))
2354 return 0;
2356 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2359 uint64_t helper_efdctsidz (uint64_t val)
2361 CPU_DoubleU u;
2363 u.ll = val;
2364 /* NaN are not treated the same way IEEE 754 does */
2365 if (unlikely(isnan(u.d)))
2366 return 0;
2368 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2371 uint32_t helper_efdctuiz (uint64_t val)
2373 CPU_DoubleU u;
2375 u.ll = val;
2376 /* NaN are not treated the same way IEEE 754 does */
2377 if (unlikely(isnan(u.d)))
2378 return 0;
2380 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2383 uint64_t helper_efdctuidz (uint64_t val)
2385 CPU_DoubleU u;
2387 u.ll = val;
2388 /* NaN are not treated the same way IEEE 754 does */
2389 if (unlikely(isnan(u.d)))
2390 return 0;
2392 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2395 uint64_t helper_efdcfsf (uint32_t val)
2397 CPU_DoubleU u;
2398 float64 tmp;
2400 u.d = int32_to_float64(val, &env->spe_status);
2401 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2402 u.d = float64_div(u.d, tmp, &env->spe_status);
2404 return u.ll;
2407 uint64_t helper_efdcfuf (uint32_t val)
2409 CPU_DoubleU u;
2410 float64 tmp;
2412 u.d = uint32_to_float64(val, &env->spe_status);
2413 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2414 u.d = float64_div(u.d, tmp, &env->spe_status);
2416 return u.ll;
2419 uint32_t helper_efdctsf (uint64_t val)
2421 CPU_DoubleU u;
2422 float64 tmp;
2424 u.ll = val;
2425 /* NaN are not treated the same way IEEE 754 does */
2426 if (unlikely(isnan(u.d)))
2427 return 0;
2428 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2429 u.d = float64_mul(u.d, tmp, &env->spe_status);
2431 return float64_to_int32(u.d, &env->spe_status);
2434 uint32_t helper_efdctuf (uint64_t val)
2436 CPU_DoubleU u;
2437 float64 tmp;
2439 u.ll = val;
2440 /* NaN are not treated the same way IEEE 754 does */
2441 if (unlikely(isnan(u.d)))
2442 return 0;
2443 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2444 u.d = float64_mul(u.d, tmp, &env->spe_status);
2446 return float64_to_uint32(u.d, &env->spe_status);
2449 uint32_t helper_efscfd (uint64_t val)
2451 CPU_DoubleU u1;
2452 CPU_FloatU u2;
2454 u1.ll = val;
2455 u2.f = float64_to_float32(u1.d, &env->spe_status);
2457 return u2.l;
2460 uint64_t helper_efdcfs (uint32_t val)
2462 CPU_DoubleU u2;
2463 CPU_FloatU u1;
2465 u1.l = val;
2466 u2.d = float32_to_float64(u1.f, &env->spe_status);
2468 return u2.ll;
2471 /* Double precision fixed-point arithmetic */
2472 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2474 CPU_DoubleU u1, u2;
2475 u1.ll = op1;
2476 u2.ll = op2;
2477 u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2478 return u1.ll;
2481 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2483 CPU_DoubleU u1, u2;
2484 u1.ll = op1;
2485 u2.ll = op2;
2486 u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2487 return u1.ll;
2490 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2492 CPU_DoubleU u1, u2;
2493 u1.ll = op1;
2494 u2.ll = op2;
2495 u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2496 return u1.ll;
2499 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2501 CPU_DoubleU u1, u2;
2502 u1.ll = op1;
2503 u2.ll = op2;
2504 u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2505 return u1.ll;
2508 /* Double precision floating point helpers */
2509 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2511 CPU_DoubleU u1, u2;
2512 u1.ll = op1;
2513 u2.ll = op2;
2514 return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2517 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2519 CPU_DoubleU u1, u2;
2520 u1.ll = op1;
2521 u2.ll = op2;
2522 return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2525 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2527 CPU_DoubleU u1, u2;
2528 u1.ll = op1;
2529 u2.ll = op2;
2530 return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2533 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2535 /* XXX: TODO: test special values (NaN, infinites, ...) */
2536 return helper_efdtstlt(op1, op2);
2539 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2541 /* XXX: TODO: test special values (NaN, infinites, ...) */
2542 return helper_efdtstgt(op1, op2);
2545 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2547 /* XXX: TODO: test special values (NaN, infinites, ...) */
2548 return helper_efdtsteq(op1, op2);
2551 /*****************************************************************************/
2552 /* Softmmu support */
2553 #if !defined (CONFIG_USER_ONLY)
2555 #define MMUSUFFIX _mmu
2557 #define SHIFT 0
2558 #include "softmmu_template.h"
2560 #define SHIFT 1
2561 #include "softmmu_template.h"
2563 #define SHIFT 2
2564 #include "softmmu_template.h"
2566 #define SHIFT 3
2567 #include "softmmu_template.h"
2569 /* try to fill the TLB and return an exception if error. If retaddr is
2570 NULL, it means that the function was called in C code (i.e. not
2571 from generated code or from helper.c) */
2572 /* XXX: fix it to restore all registers */
2573 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2575 TranslationBlock *tb;
2576 CPUState *saved_env;
2577 unsigned long pc;
2578 int ret;
2580 /* XXX: hack to restore env in all cases, even if not called from
2581 generated code */
2582 saved_env = env;
2583 env = cpu_single_env;
2584 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2585 if (unlikely(ret != 0)) {
2586 if (likely(retaddr)) {
2587 /* now we have a real cpu fault */
2588 pc = (unsigned long)retaddr;
2589 tb = tb_find_pc(pc);
2590 if (likely(tb)) {
2591 /* the PC is inside the translated code. It means that we have
2592 a virtual CPU fault */
2593 cpu_restore_state(tb, env, pc, NULL);
2596 raise_exception_err(env, env->exception_index, env->error_code);
2598 env = saved_env;
2601 /* Software driven TLBs management */
2602 /* PowerPC 602/603 software TLB load instructions helpers */
2603 static void helper_load_6xx_tlb (target_ulong new_EPN, int is_code)
2605 target_ulong RPN, CMP, EPN;
2606 int way;
2608 RPN = env->spr[SPR_RPA];
2609 if (is_code) {
2610 CMP = env->spr[SPR_ICMP];
2611 EPN = env->spr[SPR_IMISS];
2612 } else {
2613 CMP = env->spr[SPR_DCMP];
2614 EPN = env->spr[SPR_DMISS];
2616 way = (env->spr[SPR_SRR1] >> 17) & 1;
2617 #if defined (DEBUG_SOFTWARE_TLB)
2618 if (loglevel != 0) {
2619 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2620 " PTE1 " ADDRX " way %d\n",
2621 __func__, T0, EPN, CMP, RPN, way);
2623 #endif
2624 /* Store this TLB */
2625 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2626 way, is_code, CMP, RPN);
2629 void helper_load_6xx_tlbd (target_ulong EPN)
2631 helper_load_6xx_tlb(EPN, 0);
2634 void helper_load_6xx_tlbi (target_ulong EPN)
2636 helper_load_6xx_tlb(EPN, 1);
2639 /* PowerPC 74xx software TLB load instructions helpers */
2640 static void helper_load_74xx_tlb (target_ulong new_EPN, int is_code)
2642 target_ulong RPN, CMP, EPN;
2643 int way;
2645 RPN = env->spr[SPR_PTELO];
2646 CMP = env->spr[SPR_PTEHI];
2647 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2648 way = env->spr[SPR_TLBMISS] & 0x3;
2649 #if defined (DEBUG_SOFTWARE_TLB)
2650 if (loglevel != 0) {
2651 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2652 " PTE1 " ADDRX " way %d\n",
2653 __func__, T0, EPN, CMP, RPN, way);
2655 #endif
2656 /* Store this TLB */
2657 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2658 way, is_code, CMP, RPN);
2661 void helper_load_74xx_tlbd (target_ulong EPN)
2663 helper_load_74xx_tlb(EPN, 0);
2666 void helper_load_74xx_tlbi (target_ulong EPN)
2668 helper_load_74xx_tlb(EPN, 1);
2671 static always_inline target_ulong booke_tlb_to_page_size (int size)
2673 return 1024 << (2 * size);
2676 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2678 int size;
2680 switch (page_size) {
2681 case 0x00000400UL:
2682 size = 0x0;
2683 break;
2684 case 0x00001000UL:
2685 size = 0x1;
2686 break;
2687 case 0x00004000UL:
2688 size = 0x2;
2689 break;
2690 case 0x00010000UL:
2691 size = 0x3;
2692 break;
2693 case 0x00040000UL:
2694 size = 0x4;
2695 break;
2696 case 0x00100000UL:
2697 size = 0x5;
2698 break;
2699 case 0x00400000UL:
2700 size = 0x6;
2701 break;
2702 case 0x01000000UL:
2703 size = 0x7;
2704 break;
2705 case 0x04000000UL:
2706 size = 0x8;
2707 break;
2708 case 0x10000000UL:
2709 size = 0x9;
2710 break;
2711 case 0x40000000UL:
2712 size = 0xA;
2713 break;
2714 #if defined (TARGET_PPC64)
2715 case 0x000100000000ULL:
2716 size = 0xB;
2717 break;
2718 case 0x000400000000ULL:
2719 size = 0xC;
2720 break;
2721 case 0x001000000000ULL:
2722 size = 0xD;
2723 break;
2724 case 0x004000000000ULL:
2725 size = 0xE;
2726 break;
2727 case 0x010000000000ULL:
2728 size = 0xF;
2729 break;
2730 #endif
2731 default:
2732 size = -1;
2733 break;
2736 return size;
2739 /* Helpers for 4xx TLB management */
2740 void do_4xx_tlbre_lo (void)
2742 ppcemb_tlb_t *tlb;
2743 int size;
2745 T0 &= 0x3F;
2746 tlb = &env->tlb[T0].tlbe;
2747 T0 = tlb->EPN;
2748 if (tlb->prot & PAGE_VALID)
2749 T0 |= 0x400;
2750 size = booke_page_size_to_tlb(tlb->size);
2751 if (size < 0 || size > 0x7)
2752 size = 1;
2753 T0 |= size << 7;
2754 env->spr[SPR_40x_PID] = tlb->PID;
2757 void do_4xx_tlbre_hi (void)
2759 ppcemb_tlb_t *tlb;
2761 T0 &= 0x3F;
2762 tlb = &env->tlb[T0].tlbe;
2763 T0 = tlb->RPN;
2764 if (tlb->prot & PAGE_EXEC)
2765 T0 |= 0x200;
2766 if (tlb->prot & PAGE_WRITE)
2767 T0 |= 0x100;
2770 void do_4xx_tlbwe_hi (void)
2772 ppcemb_tlb_t *tlb;
2773 target_ulong page, end;
2775 #if defined (DEBUG_SOFTWARE_TLB)
2776 if (loglevel != 0) {
2777 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2779 #endif
2780 T0 &= 0x3F;
2781 tlb = &env->tlb[T0].tlbe;
2782 /* Invalidate previous TLB (if it's valid) */
2783 if (tlb->prot & PAGE_VALID) {
2784 end = tlb->EPN + tlb->size;
2785 #if defined (DEBUG_SOFTWARE_TLB)
2786 if (loglevel != 0) {
2787 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2788 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2790 #endif
2791 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2792 tlb_flush_page(env, page);
2794 tlb->size = booke_tlb_to_page_size((T1 >> 7) & 0x7);
2795 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2796 * If this ever occurs, one should use the ppcemb target instead
2797 * of the ppc or ppc64 one
2799 if ((T1 & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2800 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2801 "are not supported (%d)\n",
2802 tlb->size, TARGET_PAGE_SIZE, (int)((T1 >> 7) & 0x7));
2804 tlb->EPN = T1 & ~(tlb->size - 1);
2805 if (T1 & 0x40)
2806 tlb->prot |= PAGE_VALID;
2807 else
2808 tlb->prot &= ~PAGE_VALID;
2809 if (T1 & 0x20) {
2810 /* XXX: TO BE FIXED */
2811 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2813 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2814 tlb->attr = T1 & 0xFF;
2815 #if defined (DEBUG_SOFTWARE_TLB)
2816 if (loglevel != 0) {
2817 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2818 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2819 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2820 tlb->prot & PAGE_READ ? 'r' : '-',
2821 tlb->prot & PAGE_WRITE ? 'w' : '-',
2822 tlb->prot & PAGE_EXEC ? 'x' : '-',
2823 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2825 #endif
2826 /* Invalidate new TLB (if valid) */
2827 if (tlb->prot & PAGE_VALID) {
2828 end = tlb->EPN + tlb->size;
2829 #if defined (DEBUG_SOFTWARE_TLB)
2830 if (loglevel != 0) {
2831 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2832 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2834 #endif
2835 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2836 tlb_flush_page(env, page);
2840 void do_4xx_tlbwe_lo (void)
2842 ppcemb_tlb_t *tlb;
2844 #if defined (DEBUG_SOFTWARE_TLB)
2845 if (loglevel != 0) {
2846 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2848 #endif
2849 T0 &= 0x3F;
2850 tlb = &env->tlb[T0].tlbe;
2851 tlb->RPN = T1 & 0xFFFFFC00;
2852 tlb->prot = PAGE_READ;
2853 if (T1 & 0x200)
2854 tlb->prot |= PAGE_EXEC;
2855 if (T1 & 0x100)
2856 tlb->prot |= PAGE_WRITE;
2857 #if defined (DEBUG_SOFTWARE_TLB)
2858 if (loglevel != 0) {
2859 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2860 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2861 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2862 tlb->prot & PAGE_READ ? 'r' : '-',
2863 tlb->prot & PAGE_WRITE ? 'w' : '-',
2864 tlb->prot & PAGE_EXEC ? 'x' : '-',
2865 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2867 #endif
2870 /* PowerPC 440 TLB management */
2871 void do_440_tlbwe (int word)
2873 ppcemb_tlb_t *tlb;
2874 target_ulong EPN, RPN, size;
2875 int do_flush_tlbs;
2877 #if defined (DEBUG_SOFTWARE_TLB)
2878 if (loglevel != 0) {
2879 fprintf(logfile, "%s word %d T0 " TDX " T1 " TDX "\n",
2880 __func__, word, T0, T1);
2882 #endif
2883 do_flush_tlbs = 0;
2884 T0 &= 0x3F;
2885 tlb = &env->tlb[T0].tlbe;
2886 switch (word) {
2887 default:
2888 /* Just here to please gcc */
2889 case 0:
2890 EPN = T1 & 0xFFFFFC00;
2891 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
2892 do_flush_tlbs = 1;
2893 tlb->EPN = EPN;
2894 size = booke_tlb_to_page_size((T1 >> 4) & 0xF);
2895 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
2896 do_flush_tlbs = 1;
2897 tlb->size = size;
2898 tlb->attr &= ~0x1;
2899 tlb->attr |= (T1 >> 8) & 1;
2900 if (T1 & 0x200) {
2901 tlb->prot |= PAGE_VALID;
2902 } else {
2903 if (tlb->prot & PAGE_VALID) {
2904 tlb->prot &= ~PAGE_VALID;
2905 do_flush_tlbs = 1;
2908 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2909 if (do_flush_tlbs)
2910 tlb_flush(env, 1);
2911 break;
2912 case 1:
2913 RPN = T1 & 0xFFFFFC0F;
2914 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
2915 tlb_flush(env, 1);
2916 tlb->RPN = RPN;
2917 break;
2918 case 2:
2919 tlb->attr = (tlb->attr & 0x1) | (T1 & 0x0000FF00);
2920 tlb->prot = tlb->prot & PAGE_VALID;
2921 if (T1 & 0x1)
2922 tlb->prot |= PAGE_READ << 4;
2923 if (T1 & 0x2)
2924 tlb->prot |= PAGE_WRITE << 4;
2925 if (T1 & 0x4)
2926 tlb->prot |= PAGE_EXEC << 4;
2927 if (T1 & 0x8)
2928 tlb->prot |= PAGE_READ;
2929 if (T1 & 0x10)
2930 tlb->prot |= PAGE_WRITE;
2931 if (T1 & 0x20)
2932 tlb->prot |= PAGE_EXEC;
2933 break;
2937 void do_440_tlbre (int word)
2939 ppcemb_tlb_t *tlb;
2940 int size;
2942 T0 &= 0x3F;
2943 tlb = &env->tlb[T0].tlbe;
2944 switch (word) {
2945 default:
2946 /* Just here to please gcc */
2947 case 0:
2948 T0 = tlb->EPN;
2949 size = booke_page_size_to_tlb(tlb->size);
2950 if (size < 0 || size > 0xF)
2951 size = 1;
2952 T0 |= size << 4;
2953 if (tlb->attr & 0x1)
2954 T0 |= 0x100;
2955 if (tlb->prot & PAGE_VALID)
2956 T0 |= 0x200;
2957 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2958 env->spr[SPR_440_MMUCR] |= tlb->PID;
2959 break;
2960 case 1:
2961 T0 = tlb->RPN;
2962 break;
2963 case 2:
2964 T0 = tlb->attr & ~0x1;
2965 if (tlb->prot & (PAGE_READ << 4))
2966 T0 |= 0x1;
2967 if (tlb->prot & (PAGE_WRITE << 4))
2968 T0 |= 0x2;
2969 if (tlb->prot & (PAGE_EXEC << 4))
2970 T0 |= 0x4;
2971 if (tlb->prot & PAGE_READ)
2972 T0 |= 0x8;
2973 if (tlb->prot & PAGE_WRITE)
2974 T0 |= 0x10;
2975 if (tlb->prot & PAGE_EXEC)
2976 T0 |= 0x20;
2977 break;
2980 #endif /* !CONFIG_USER_ONLY */