target-ppc: convert load/store string instructions to TCG
[qemu/qemu-JZ.git] / target-ppc / op_helper.c
blob75fbefbb969fe3f2767aad3cc022272b713fd526
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "helper.h"
24 #include "helper_regs.h"
25 #include "op_helper.h"
27 #define MEMSUFFIX _raw
28 #include "op_helper.h"
29 #include "op_helper_mem.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #define MEMSUFFIX _user
32 #include "op_helper.h"
33 #include "op_helper_mem.h"
34 #define MEMSUFFIX _kernel
35 #include "op_helper.h"
36 #include "op_helper_mem.h"
37 #define MEMSUFFIX _hypv
38 #include "op_helper.h"
39 #include "op_helper_mem.h"
40 #endif
42 //#define DEBUG_OP
43 //#define DEBUG_EXCEPTIONS
44 //#define DEBUG_SOFTWARE_TLB
46 /*****************************************************************************/
47 /* Exceptions processing helpers */
49 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
51 raise_exception_err(env, exception, error_code);
54 void helper_raise_debug (void)
56 raise_exception(env, EXCP_DEBUG);
59 /*****************************************************************************/
60 /* Registers load and stores */
61 target_ulong helper_load_cr (void)
63 return (env->crf[0] << 28) |
64 (env->crf[1] << 24) |
65 (env->crf[2] << 20) |
66 (env->crf[3] << 16) |
67 (env->crf[4] << 12) |
68 (env->crf[5] << 8) |
69 (env->crf[6] << 4) |
70 (env->crf[7] << 0);
73 void helper_store_cr (target_ulong val, uint32_t mask)
75 int i, sh;
77 for (i = 0, sh = 7; i < 8; i++, sh--) {
78 if (mask & (1 << sh))
79 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
83 #if defined(TARGET_PPC64)
84 void do_store_pri (int prio)
86 env->spr[SPR_PPR] &= ~0x001C000000000000ULL;
87 env->spr[SPR_PPR] |= ((uint64_t)prio & 0x7) << 50;
89 #endif
91 target_ulong ppc_load_dump_spr (int sprn)
93 if (loglevel != 0) {
94 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
95 sprn, sprn, env->spr[sprn]);
98 return env->spr[sprn];
101 void ppc_store_dump_spr (int sprn, target_ulong val)
103 if (loglevel != 0) {
104 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
105 sprn, sprn, env->spr[sprn], val);
107 env->spr[sprn] = val;
110 /*****************************************************************************/
111 /* Memory load and stores */
113 static always_inline target_ulong get_addr(target_ulong addr)
115 #if defined(TARGET_PPC64)
116 if (msr_sf)
117 return addr;
118 else
119 #endif
120 return (uint32_t)addr;
123 void helper_lmw (target_ulong addr, uint32_t reg)
125 #ifdef CONFIG_USER_ONLY
126 #define ldfun ldl_raw
127 #else
128 int (*ldfun)(target_ulong);
130 switch (env->mmu_idx) {
131 default:
132 case 0: ldfun = ldl_user;
133 break;
134 case 1: ldfun = ldl_kernel;
135 break;
136 case 2: ldfun = ldl_hypv;
137 break;
139 #endif
140 for (; reg < 32; reg++, addr += 4) {
141 if (msr_le)
142 env->gpr[reg] = bswap32(ldfun(get_addr(addr)));
143 else
144 env->gpr[reg] = ldfun(get_addr(addr));
148 void helper_stmw (target_ulong addr, uint32_t reg)
150 #ifdef CONFIG_USER_ONLY
151 #define stfun stl_raw
152 #else
153 void (*stfun)(target_ulong, int);
155 switch (env->mmu_idx) {
156 default:
157 case 0: stfun = stl_user;
158 break;
159 case 1: stfun = stl_kernel;
160 break;
161 case 2: stfun = stl_hypv;
162 break;
164 #endif
165 for (; reg < 32; reg++, addr += 4) {
166 if (msr_le)
167 stfun(get_addr(addr), bswap32((uint32_t)env->gpr[reg]));
168 else
169 stfun(get_addr(addr), (uint32_t)env->gpr[reg]);
173 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
175 int sh;
176 #ifdef CONFIG_USER_ONLY
177 #define ldfunl ldl_raw
178 #define ldfunb ldub_raw
179 #else
180 int (*ldfunl)(target_ulong);
181 int (*ldfunb)(target_ulong);
183 switch (env->mmu_idx) {
184 default:
185 case 0:
186 ldfunl = ldl_user;
187 ldfunb = ldub_user;
188 break;
189 case 1:
190 ldfunl = ldl_kernel;
191 ldfunb = ldub_kernel;
192 break;
193 case 2:
194 ldfunl = ldl_hypv;
195 ldfunb = ldub_hypv;
196 break;
198 #endif
199 for (; nb > 3; nb -= 4, addr += 4) {
200 env->gpr[reg] = ldfunl(get_addr(addr));
201 reg = (reg + 1) % 32;
203 if (unlikely(nb > 0)) {
204 env->gpr[reg] = 0;
205 for (sh = 24; nb > 0; nb--, addr++, sh -= 8) {
206 env->gpr[reg] |= ldfunb(get_addr(addr)) << sh;
210 /* PPC32 specification says we must generate an exception if
211 * rA is in the range of registers to be loaded.
212 * In an other hand, IBM says this is valid, but rA won't be loaded.
213 * For now, I'll follow the spec...
215 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
217 if (likely(xer_bc != 0)) {
218 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
219 (reg < rb && (reg + xer_bc) > rb))) {
220 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
221 POWERPC_EXCP_INVAL |
222 POWERPC_EXCP_INVAL_LSWX);
223 } else {
224 helper_lsw(addr, xer_bc, reg);
229 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
231 int sh;
232 #ifdef CONFIG_USER_ONLY
233 #define stfunl stl_raw
234 #define stfunb stb_raw
235 #else
236 void (*stfunl)(target_ulong, int);
237 void (*stfunb)(target_ulong, int);
239 switch (env->mmu_idx) {
240 default:
241 case 0:
242 stfunl = stl_user;
243 stfunb = stb_user;
244 break;
245 case 1:
246 stfunl = stl_kernel;
247 stfunb = stb_kernel;
248 break;
249 case 2:
250 stfunl = stl_hypv;
251 stfunb = stb_hypv;
252 break;
254 #endif
256 for (; nb > 3; nb -= 4, addr += 4) {
257 stfunl(get_addr(addr), env->gpr[reg]);
258 reg = (reg + 1) % 32;
260 if (unlikely(nb > 0)) {
261 for (sh = 24; nb > 0; nb--, addr++, sh -= 8)
262 stfunb(get_addr(addr), (env->gpr[reg] >> sh) & 0xFF);
266 static void do_dcbz(target_ulong addr, int dcache_line_size)
268 target_long mask = get_addr(~(dcache_line_size - 1));
269 int i;
270 #ifdef CONFIG_USER_ONLY
271 #define stfun stl_raw
272 #else
273 void (*stfun)(target_ulong, int);
275 switch (env->mmu_idx) {
276 default:
277 case 0: stfun = stl_user;
278 break;
279 case 1: stfun = stl_kernel;
280 break;
281 case 2: stfun = stl_hypv;
282 break;
284 #endif
285 addr &= mask;
286 for (i = 0 ; i < dcache_line_size ; i += 4) {
287 stfun(addr + i , 0);
289 if ((env->reserve & mask) == addr)
290 env->reserve = (target_ulong)-1ULL;
293 void helper_dcbz(target_ulong addr)
295 do_dcbz(addr, env->dcache_line_size);
298 void helper_dcbz_970(target_ulong addr)
300 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
301 do_dcbz(addr, 32);
302 else
303 do_dcbz(addr, env->dcache_line_size);
306 void helper_icbi(target_ulong addr)
308 uint32_t tmp;
310 addr = get_addr(addr & ~(env->dcache_line_size - 1));
311 /* Invalidate one cache line :
312 * PowerPC specification says this is to be treated like a load
313 * (not a fetch) by the MMU. To be sure it will be so,
314 * do the load "by hand".
316 #ifdef CONFIG_USER_ONLY
317 tmp = ldl_raw(addr);
318 #else
319 switch (env->mmu_idx) {
320 default:
321 case 0: tmp = ldl_user(addr);
322 break;
323 case 1: tmp = ldl_kernel(addr);
324 break;
325 case 2: tmp = ldl_hypv(addr);
326 break;
328 #endif
329 tb_invalidate_page_range(addr, addr + env->icache_line_size);
332 /*****************************************************************************/
333 /* Fixed point operations helpers */
334 #if defined(TARGET_PPC64)
336 /* multiply high word */
337 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
339 uint64_t tl, th;
341 muls64(&tl, &th, arg1, arg2);
342 return th;
345 /* multiply high word unsigned */
346 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
348 uint64_t tl, th;
350 mulu64(&tl, &th, arg1, arg2);
351 return th;
354 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
356 int64_t th;
357 uint64_t tl;
359 muls64(&tl, (uint64_t *)&th, arg1, arg2);
360 /* If th != 0 && th != -1, then we had an overflow */
361 if (likely((uint64_t)(th + 1) <= 1)) {
362 env->xer &= ~(1 << XER_OV);
363 } else {
364 env->xer |= (1 << XER_OV) | (1 << XER_SO);
366 return (int64_t)tl;
368 #endif
370 target_ulong helper_cntlzw (target_ulong t)
372 return clz32(t);
375 #if defined(TARGET_PPC64)
376 target_ulong helper_cntlzd (target_ulong t)
378 return clz64(t);
380 #endif
382 /* shift right arithmetic helper */
383 target_ulong helper_sraw (target_ulong value, target_ulong shift)
385 int32_t ret;
387 if (likely(!(shift & 0x20))) {
388 if (likely((uint32_t)shift != 0)) {
389 shift &= 0x1f;
390 ret = (int32_t)value >> shift;
391 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
392 env->xer &= ~(1 << XER_CA);
393 } else {
394 env->xer |= (1 << XER_CA);
396 } else {
397 ret = (int32_t)value;
398 env->xer &= ~(1 << XER_CA);
400 } else {
401 ret = (int32_t)value >> 31;
402 if (ret) {
403 env->xer |= (1 << XER_CA);
404 } else {
405 env->xer &= ~(1 << XER_CA);
408 return (target_long)ret;
411 #if defined(TARGET_PPC64)
412 target_ulong helper_srad (target_ulong value, target_ulong shift)
414 int64_t ret;
416 if (likely(!(shift & 0x40))) {
417 if (likely((uint64_t)shift != 0)) {
418 shift &= 0x3f;
419 ret = (int64_t)value >> shift;
420 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
421 env->xer &= ~(1 << XER_CA);
422 } else {
423 env->xer |= (1 << XER_CA);
425 } else {
426 ret = (int64_t)value;
427 env->xer &= ~(1 << XER_CA);
429 } else {
430 ret = (int64_t)value >> 63;
431 if (ret) {
432 env->xer |= (1 << XER_CA);
433 } else {
434 env->xer &= ~(1 << XER_CA);
437 return ret;
439 #endif
441 target_ulong helper_popcntb (target_ulong val)
443 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
444 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
445 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
446 return val;
449 #if defined(TARGET_PPC64)
450 target_ulong helper_popcntb_64 (target_ulong val)
452 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
453 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
454 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
455 return val;
457 #endif
459 /*****************************************************************************/
460 /* Floating point operations helpers */
461 uint64_t helper_float32_to_float64(uint32_t arg)
463 CPU_FloatU f;
464 CPU_DoubleU d;
465 f.l = arg;
466 d.d = float32_to_float64(f.f, &env->fp_status);
467 return d.ll;
470 uint32_t helper_float64_to_float32(uint64_t arg)
472 CPU_FloatU f;
473 CPU_DoubleU d;
474 d.ll = arg;
475 f.f = float64_to_float32(d.d, &env->fp_status);
476 return f.l;
479 static always_inline int fpisneg (float64 d)
481 CPU_DoubleU u;
483 u.d = d;
485 return u.ll >> 63 != 0;
488 static always_inline int isden (float64 d)
490 CPU_DoubleU u;
492 u.d = d;
494 return ((u.ll >> 52) & 0x7FF) == 0;
497 static always_inline int iszero (float64 d)
499 CPU_DoubleU u;
501 u.d = d;
503 return (u.ll & ~0x8000000000000000ULL) == 0;
506 static always_inline int isinfinity (float64 d)
508 CPU_DoubleU u;
510 u.d = d;
512 return ((u.ll >> 52) & 0x7FF) == 0x7FF &&
513 (u.ll & 0x000FFFFFFFFFFFFFULL) == 0;
516 #ifdef CONFIG_SOFTFLOAT
517 static always_inline int isfinite (float64 d)
519 CPU_DoubleU u;
521 u.d = d;
523 return (((u.ll >> 52) & 0x7FF) != 0x7FF);
526 static always_inline int isnormal (float64 d)
528 CPU_DoubleU u;
530 u.d = d;
532 uint32_t exp = (u.ll >> 52) & 0x7FF;
533 return ((0 < exp) && (exp < 0x7FF));
535 #endif
537 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
539 CPU_DoubleU farg;
540 int isneg;
541 int ret;
542 farg.ll = arg;
543 isneg = fpisneg(farg.d);
544 if (unlikely(float64_is_nan(farg.d))) {
545 if (float64_is_signaling_nan(farg.d)) {
546 /* Signaling NaN: flags are undefined */
547 ret = 0x00;
548 } else {
549 /* Quiet NaN */
550 ret = 0x11;
552 } else if (unlikely(isinfinity(farg.d))) {
553 /* +/- infinity */
554 if (isneg)
555 ret = 0x09;
556 else
557 ret = 0x05;
558 } else {
559 if (iszero(farg.d)) {
560 /* +/- zero */
561 if (isneg)
562 ret = 0x12;
563 else
564 ret = 0x02;
565 } else {
566 if (isden(farg.d)) {
567 /* Denormalized numbers */
568 ret = 0x10;
569 } else {
570 /* Normalized numbers */
571 ret = 0x00;
573 if (isneg) {
574 ret |= 0x08;
575 } else {
576 ret |= 0x04;
580 if (set_fprf) {
581 /* We update FPSCR_FPRF */
582 env->fpscr &= ~(0x1F << FPSCR_FPRF);
583 env->fpscr |= ret << FPSCR_FPRF;
585 /* We just need fpcc to update Rc1 */
586 return ret & 0xF;
589 /* Floating-point invalid operations exception */
590 static always_inline uint64_t fload_invalid_op_excp (int op)
592 uint64_t ret = 0;
593 int ve;
595 ve = fpscr_ve;
596 if (op & POWERPC_EXCP_FP_VXSNAN) {
597 /* Operation on signaling NaN */
598 env->fpscr |= 1 << FPSCR_VXSNAN;
600 if (op & POWERPC_EXCP_FP_VXSOFT) {
601 /* Software-defined condition */
602 env->fpscr |= 1 << FPSCR_VXSOFT;
604 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
605 case POWERPC_EXCP_FP_VXISI:
606 /* Magnitude subtraction of infinities */
607 env->fpscr |= 1 << FPSCR_VXISI;
608 goto update_arith;
609 case POWERPC_EXCP_FP_VXIDI:
610 /* Division of infinity by infinity */
611 env->fpscr |= 1 << FPSCR_VXIDI;
612 goto update_arith;
613 case POWERPC_EXCP_FP_VXZDZ:
614 /* Division of zero by zero */
615 env->fpscr |= 1 << FPSCR_VXZDZ;
616 goto update_arith;
617 case POWERPC_EXCP_FP_VXIMZ:
618 /* Multiplication of zero by infinity */
619 env->fpscr |= 1 << FPSCR_VXIMZ;
620 goto update_arith;
621 case POWERPC_EXCP_FP_VXVC:
622 /* Ordered comparison of NaN */
623 env->fpscr |= 1 << FPSCR_VXVC;
624 env->fpscr &= ~(0xF << FPSCR_FPCC);
625 env->fpscr |= 0x11 << FPSCR_FPCC;
626 /* We must update the target FPR before raising the exception */
627 if (ve != 0) {
628 env->exception_index = POWERPC_EXCP_PROGRAM;
629 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
630 /* Update the floating-point enabled exception summary */
631 env->fpscr |= 1 << FPSCR_FEX;
632 /* Exception is differed */
633 ve = 0;
635 break;
636 case POWERPC_EXCP_FP_VXSQRT:
637 /* Square root of a negative number */
638 env->fpscr |= 1 << FPSCR_VXSQRT;
639 update_arith:
640 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
641 if (ve == 0) {
642 /* Set the result to quiet NaN */
643 ret = UINT64_MAX;
644 env->fpscr &= ~(0xF << FPSCR_FPCC);
645 env->fpscr |= 0x11 << FPSCR_FPCC;
647 break;
648 case POWERPC_EXCP_FP_VXCVI:
649 /* Invalid conversion */
650 env->fpscr |= 1 << FPSCR_VXCVI;
651 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
652 if (ve == 0) {
653 /* Set the result to quiet NaN */
654 ret = UINT64_MAX;
655 env->fpscr &= ~(0xF << FPSCR_FPCC);
656 env->fpscr |= 0x11 << FPSCR_FPCC;
658 break;
660 /* Update the floating-point invalid operation summary */
661 env->fpscr |= 1 << FPSCR_VX;
662 /* Update the floating-point exception summary */
663 env->fpscr |= 1 << FPSCR_FX;
664 if (ve != 0) {
665 /* Update the floating-point enabled exception summary */
666 env->fpscr |= 1 << FPSCR_FEX;
667 if (msr_fe0 != 0 || msr_fe1 != 0)
668 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
670 return ret;
673 static always_inline uint64_t float_zero_divide_excp (uint64_t arg1, uint64_t arg2)
675 env->fpscr |= 1 << FPSCR_ZX;
676 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
677 /* Update the floating-point exception summary */
678 env->fpscr |= 1 << FPSCR_FX;
679 if (fpscr_ze != 0) {
680 /* Update the floating-point enabled exception summary */
681 env->fpscr |= 1 << FPSCR_FEX;
682 if (msr_fe0 != 0 || msr_fe1 != 0) {
683 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
684 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
686 } else {
687 /* Set the result to infinity */
688 arg1 = ((arg1 ^ arg2) & 0x8000000000000000ULL);
689 arg1 |= 0x7FFULL << 52;
691 return arg1;
694 static always_inline void float_overflow_excp (void)
696 env->fpscr |= 1 << FPSCR_OX;
697 /* Update the floating-point exception summary */
698 env->fpscr |= 1 << FPSCR_FX;
699 if (fpscr_oe != 0) {
700 /* XXX: should adjust the result */
701 /* Update the floating-point enabled exception summary */
702 env->fpscr |= 1 << FPSCR_FEX;
703 /* We must update the target FPR before raising the exception */
704 env->exception_index = POWERPC_EXCP_PROGRAM;
705 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
706 } else {
707 env->fpscr |= 1 << FPSCR_XX;
708 env->fpscr |= 1 << FPSCR_FI;
712 static always_inline void float_underflow_excp (void)
714 env->fpscr |= 1 << FPSCR_UX;
715 /* Update the floating-point exception summary */
716 env->fpscr |= 1 << FPSCR_FX;
717 if (fpscr_ue != 0) {
718 /* XXX: should adjust the result */
719 /* Update the floating-point enabled exception summary */
720 env->fpscr |= 1 << FPSCR_FEX;
721 /* We must update the target FPR before raising the exception */
722 env->exception_index = POWERPC_EXCP_PROGRAM;
723 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
727 static always_inline void float_inexact_excp (void)
729 env->fpscr |= 1 << FPSCR_XX;
730 /* Update the floating-point exception summary */
731 env->fpscr |= 1 << FPSCR_FX;
732 if (fpscr_xe != 0) {
733 /* Update the floating-point enabled exception summary */
734 env->fpscr |= 1 << FPSCR_FEX;
735 /* We must update the target FPR before raising the exception */
736 env->exception_index = POWERPC_EXCP_PROGRAM;
737 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
741 static always_inline void fpscr_set_rounding_mode (void)
743 int rnd_type;
745 /* Set rounding mode */
746 switch (fpscr_rn) {
747 case 0:
748 /* Best approximation (round to nearest) */
749 rnd_type = float_round_nearest_even;
750 break;
751 case 1:
752 /* Smaller magnitude (round toward zero) */
753 rnd_type = float_round_to_zero;
754 break;
755 case 2:
756 /* Round toward +infinite */
757 rnd_type = float_round_up;
758 break;
759 default:
760 case 3:
761 /* Round toward -infinite */
762 rnd_type = float_round_down;
763 break;
765 set_float_rounding_mode(rnd_type, &env->fp_status);
768 void helper_fpscr_setbit (uint32_t bit)
770 int prev;
772 prev = (env->fpscr >> bit) & 1;
773 env->fpscr |= 1 << bit;
774 if (prev == 0) {
775 switch (bit) {
776 case FPSCR_VX:
777 env->fpscr |= 1 << FPSCR_FX;
778 if (fpscr_ve)
779 goto raise_ve;
780 case FPSCR_OX:
781 env->fpscr |= 1 << FPSCR_FX;
782 if (fpscr_oe)
783 goto raise_oe;
784 break;
785 case FPSCR_UX:
786 env->fpscr |= 1 << FPSCR_FX;
787 if (fpscr_ue)
788 goto raise_ue;
789 break;
790 case FPSCR_ZX:
791 env->fpscr |= 1 << FPSCR_FX;
792 if (fpscr_ze)
793 goto raise_ze;
794 break;
795 case FPSCR_XX:
796 env->fpscr |= 1 << FPSCR_FX;
797 if (fpscr_xe)
798 goto raise_xe;
799 break;
800 case FPSCR_VXSNAN:
801 case FPSCR_VXISI:
802 case FPSCR_VXIDI:
803 case FPSCR_VXZDZ:
804 case FPSCR_VXIMZ:
805 case FPSCR_VXVC:
806 case FPSCR_VXSOFT:
807 case FPSCR_VXSQRT:
808 case FPSCR_VXCVI:
809 env->fpscr |= 1 << FPSCR_VX;
810 env->fpscr |= 1 << FPSCR_FX;
811 if (fpscr_ve != 0)
812 goto raise_ve;
813 break;
814 case FPSCR_VE:
815 if (fpscr_vx != 0) {
816 raise_ve:
817 env->error_code = POWERPC_EXCP_FP;
818 if (fpscr_vxsnan)
819 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
820 if (fpscr_vxisi)
821 env->error_code |= POWERPC_EXCP_FP_VXISI;
822 if (fpscr_vxidi)
823 env->error_code |= POWERPC_EXCP_FP_VXIDI;
824 if (fpscr_vxzdz)
825 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
826 if (fpscr_vximz)
827 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
828 if (fpscr_vxvc)
829 env->error_code |= POWERPC_EXCP_FP_VXVC;
830 if (fpscr_vxsoft)
831 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
832 if (fpscr_vxsqrt)
833 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
834 if (fpscr_vxcvi)
835 env->error_code |= POWERPC_EXCP_FP_VXCVI;
836 goto raise_excp;
838 break;
839 case FPSCR_OE:
840 if (fpscr_ox != 0) {
841 raise_oe:
842 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
843 goto raise_excp;
845 break;
846 case FPSCR_UE:
847 if (fpscr_ux != 0) {
848 raise_ue:
849 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
850 goto raise_excp;
852 break;
853 case FPSCR_ZE:
854 if (fpscr_zx != 0) {
855 raise_ze:
856 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
857 goto raise_excp;
859 break;
860 case FPSCR_XE:
861 if (fpscr_xx != 0) {
862 raise_xe:
863 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
864 goto raise_excp;
866 break;
867 case FPSCR_RN1:
868 case FPSCR_RN:
869 fpscr_set_rounding_mode();
870 break;
871 default:
872 break;
873 raise_excp:
874 /* Update the floating-point enabled exception summary */
875 env->fpscr |= 1 << FPSCR_FEX;
876 /* We have to update Rc1 before raising the exception */
877 env->exception_index = POWERPC_EXCP_PROGRAM;
878 break;
883 void helper_store_fpscr (uint64_t arg, uint32_t mask)
886 * We use only the 32 LSB of the incoming fpr
888 uint32_t prev, new;
889 int i;
891 prev = env->fpscr;
892 new = (uint32_t)arg;
893 new &= ~0x90000000;
894 new |= prev & 0x90000000;
895 for (i = 0; i < 7; i++) {
896 if (mask & (1 << i)) {
897 env->fpscr &= ~(0xF << (4 * i));
898 env->fpscr |= new & (0xF << (4 * i));
901 /* Update VX and FEX */
902 if (fpscr_ix != 0)
903 env->fpscr |= 1 << FPSCR_VX;
904 else
905 env->fpscr &= ~(1 << FPSCR_VX);
906 if ((fpscr_ex & fpscr_eex) != 0) {
907 env->fpscr |= 1 << FPSCR_FEX;
908 env->exception_index = POWERPC_EXCP_PROGRAM;
909 /* XXX: we should compute it properly */
910 env->error_code = POWERPC_EXCP_FP;
912 else
913 env->fpscr &= ~(1 << FPSCR_FEX);
914 fpscr_set_rounding_mode();
917 void helper_float_check_status (void)
919 #ifdef CONFIG_SOFTFLOAT
920 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
921 (env->error_code & POWERPC_EXCP_FP)) {
922 /* Differred floating-point exception after target FPR update */
923 if (msr_fe0 != 0 || msr_fe1 != 0)
924 raise_exception_err(env, env->exception_index, env->error_code);
925 } else if (env->fp_status.float_exception_flags & float_flag_overflow) {
926 float_overflow_excp();
927 } else if (env->fp_status.float_exception_flags & float_flag_underflow) {
928 float_underflow_excp();
929 } else if (env->fp_status.float_exception_flags & float_flag_inexact) {
930 float_inexact_excp();
932 #else
933 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
934 (env->error_code & POWERPC_EXCP_FP)) {
935 /* Differred floating-point exception after target FPR update */
936 if (msr_fe0 != 0 || msr_fe1 != 0)
937 raise_exception_err(env, env->exception_index, env->error_code);
939 RETURN();
940 #endif
943 #ifdef CONFIG_SOFTFLOAT
944 void helper_reset_fpstatus (void)
946 env->fp_status.float_exception_flags = 0;
948 #endif
950 /* fadd - fadd. */
951 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
953 CPU_DoubleU farg1, farg2;
955 farg1.ll = arg1;
956 farg2.ll = arg2;
957 #if USE_PRECISE_EMULATION
958 if (unlikely(float64_is_signaling_nan(farg1.d) ||
959 float64_is_signaling_nan(farg2.d))) {
960 /* sNaN addition */
961 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
962 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
963 fpisneg(farg1.d) == fpisneg(farg2.d))) {
964 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
965 } else {
966 /* Magnitude subtraction of infinities */
967 farg1.ll == fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
969 #else
970 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
971 #endif
972 return farg1.ll;
975 /* fsub - fsub. */
976 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
978 CPU_DoubleU farg1, farg2;
980 farg1.ll = arg1;
981 farg2.ll = arg2;
982 #if USE_PRECISE_EMULATION
984 if (unlikely(float64_is_signaling_nan(farg1.d) ||
985 float64_is_signaling_nan(farg2.d))) {
986 /* sNaN subtraction */
987 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
988 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
989 fpisneg(farg1.d) != fpisneg(farg2.d))) {
990 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
991 } else {
992 /* Magnitude subtraction of infinities */
993 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
996 #else
997 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
998 #endif
999 return farg1.ll;
1002 /* fmul - fmul. */
1003 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1005 CPU_DoubleU farg1, farg2;
1007 farg1.ll = arg1;
1008 farg2.ll = arg2;
1009 #if USE_PRECISE_EMULATION
1010 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1011 float64_is_signaling_nan(farg2.d))) {
1012 /* sNaN multiplication */
1013 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1014 } else if (unlikely((isinfinity(farg1.d) && iszero(farg2.d)) ||
1015 (iszero(farg1.d) && isinfinity(farg2.d)))) {
1016 /* Multiplication of zero by infinity */
1017 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1018 } else {
1019 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1022 #else
1023 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1024 #endif
1025 return farg1.ll;
1028 /* fdiv - fdiv. */
1029 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1031 CPU_DoubleU farg1, farg2;
1033 farg1.ll = arg1;
1034 farg2.ll = arg2;
1035 #if USE_PRECISE_EMULATION
1036 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1037 float64_is_signaling_nan(farg2.d))) {
1038 /* sNaN division */
1039 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1040 } else if (unlikely(isinfinity(farg1.d) && isinfinity(farg2.d))) {
1041 /* Division of infinity by infinity */
1042 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1043 } else if (unlikely(iszero(farg2.d))) {
1044 if (iszero(farg1.d)) {
1045 /* Division of zero by zero */
1046 farg1.ll fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1047 } else {
1048 /* Division by zero */
1049 farg1.ll = float_zero_divide_excp(farg1.d, farg2.d);
1051 } else {
1052 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1054 #else
1055 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1056 #endif
1057 return farg1.ll;
1060 /* fabs */
1061 uint64_t helper_fabs (uint64_t arg)
1063 CPU_DoubleU farg;
1065 farg.ll = arg;
1066 farg.d = float64_abs(farg.d);
1067 return farg.ll;
1070 /* fnabs */
1071 uint64_t helper_fnabs (uint64_t arg)
1073 CPU_DoubleU farg;
1075 farg.ll = arg;
1076 farg.d = float64_abs(farg.d);
1077 farg.d = float64_chs(farg.d);
1078 return farg.ll;
1081 /* fneg */
1082 uint64_t helper_fneg (uint64_t arg)
1084 CPU_DoubleU farg;
1086 farg.ll = arg;
1087 farg.d = float64_chs(farg.d);
1088 return farg.ll;
1091 /* fctiw - fctiw. */
1092 uint64_t helper_fctiw (uint64_t arg)
1094 CPU_DoubleU farg;
1095 farg.ll = arg;
1097 if (unlikely(float64_is_signaling_nan(farg.d))) {
1098 /* sNaN conversion */
1099 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1100 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1101 /* qNan / infinity conversion */
1102 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1103 } else {
1104 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1105 #if USE_PRECISE_EMULATION
1106 /* XXX: higher bits are not supposed to be significant.
1107 * to make tests easier, return the same as a real PowerPC 750
1109 farg.ll |= 0xFFF80000ULL << 32;
1110 #endif
1112 return farg.ll;
1115 /* fctiwz - fctiwz. */
1116 uint64_t helper_fctiwz (uint64_t arg)
1118 CPU_DoubleU farg;
1119 farg.ll = arg;
1121 if (unlikely(float64_is_signaling_nan(farg.d))) {
1122 /* sNaN conversion */
1123 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1124 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1125 /* qNan / infinity conversion */
1126 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1127 } else {
1128 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1129 #if USE_PRECISE_EMULATION
1130 /* XXX: higher bits are not supposed to be significant.
1131 * to make tests easier, return the same as a real PowerPC 750
1133 farg.ll |= 0xFFF80000ULL << 32;
1134 #endif
1136 return farg.ll;
1139 #if defined(TARGET_PPC64)
1140 /* fcfid - fcfid. */
1141 uint64_t helper_fcfid (uint64_t arg)
1143 CPU_DoubleU farg;
1144 farg.d = int64_to_float64(arg, &env->fp_status);
1145 return farg.ll;
1148 /* fctid - fctid. */
1149 uint64_t helper_fctid (uint64_t arg)
1151 CPU_DoubleU farg;
1152 farg.ll = arg;
1154 if (unlikely(float64_is_signaling_nan(farg.d))) {
1155 /* sNaN conversion */
1156 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1157 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1158 /* qNan / infinity conversion */
1159 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1160 } else {
1161 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1163 return farg.ll;
1166 /* fctidz - fctidz. */
1167 uint64_t helper_fctidz (uint64_t arg)
1169 CPU_DoubleU farg;
1170 farg.ll = arg;
1172 if (unlikely(float64_is_signaling_nan(farg.d))) {
1173 /* sNaN conversion */
1174 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1175 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1176 /* qNan / infinity conversion */
1177 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1178 } else {
1179 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1181 return farg.ll;
1184 #endif
1186 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1188 CPU_DoubleU farg;
1189 farg.ll = arg;
1191 if (unlikely(float64_is_signaling_nan(farg.d))) {
1192 /* sNaN round */
1193 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1194 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1195 /* qNan / infinity round */
1196 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1197 } else {
1198 set_float_rounding_mode(rounding_mode, &env->fp_status);
1199 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1200 /* Restore rounding mode from FPSCR */
1201 fpscr_set_rounding_mode();
1203 return farg.ll;
1206 uint64_t helper_frin (uint64_t arg)
1208 return do_fri(arg, float_round_nearest_even);
1211 uint64_t helper_friz (uint64_t arg)
1213 return do_fri(arg, float_round_to_zero);
1216 uint64_t helper_frip (uint64_t arg)
1218 return do_fri(arg, float_round_up);
1221 uint64_t helper_frim (uint64_t arg)
1223 return do_fri(arg, float_round_down);
1226 /* fmadd - fmadd. */
1227 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1229 CPU_DoubleU farg1, farg2, farg3;
1231 farg1.ll = arg1;
1232 farg2.ll = arg2;
1233 farg3.ll = arg3;
1234 #if USE_PRECISE_EMULATION
1235 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1236 float64_is_signaling_nan(farg2.d) ||
1237 float64_is_signaling_nan(farg3.d))) {
1238 /* sNaN operation */
1239 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1240 } else {
1241 #ifdef FLOAT128
1242 /* This is the way the PowerPC specification defines it */
1243 float128 ft0_128, ft1_128;
1245 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1246 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1247 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1248 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1249 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1250 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1251 #else
1252 /* This is OK on x86 hosts */
1253 farg1.d = (farg1.d * farg2.d) + farg3.d;
1254 #endif
1256 #else
1257 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1258 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1259 #endif
1260 return farg1.ll;
1263 /* fmsub - fmsub. */
1264 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1266 CPU_DoubleU farg1, farg2, farg3;
1268 farg1.ll = arg1;
1269 farg2.ll = arg2;
1270 farg3.ll = arg3;
1271 #if USE_PRECISE_EMULATION
1272 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1273 float64_is_signaling_nan(farg2.d) ||
1274 float64_is_signaling_nan(farg3.d))) {
1275 /* sNaN operation */
1276 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1277 } else {
1278 #ifdef FLOAT128
1279 /* This is the way the PowerPC specification defines it */
1280 float128 ft0_128, ft1_128;
1282 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1283 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1284 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1285 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1286 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1287 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1288 #else
1289 /* This is OK on x86 hosts */
1290 farg1.d = (farg1.d * farg2.d) - farg3.d;
1291 #endif
1293 #else
1294 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1295 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1296 #endif
1297 return farg1.ll;
1300 /* fnmadd - fnmadd. */
1301 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1303 CPU_DoubleU farg1, farg2, farg3;
1305 farg1.ll = arg1;
1306 farg2.ll = arg2;
1307 farg3.ll = arg3;
1309 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1310 float64_is_signaling_nan(farg2.d) ||
1311 float64_is_signaling_nan(farg3.d))) {
1312 /* sNaN operation */
1313 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1314 } else {
1315 #if USE_PRECISE_EMULATION
1316 #ifdef FLOAT128
1317 /* This is the way the PowerPC specification defines it */
1318 float128 ft0_128, ft1_128;
1320 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1321 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1322 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1323 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1324 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1325 farg1.d= float128_to_float64(ft0_128, &env->fp_status);
1326 #else
1327 /* This is OK on x86 hosts */
1328 farg1.d = (farg1.d * farg2.d) + farg3.d;
1329 #endif
1330 #else
1331 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1332 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1333 #endif
1334 if (likely(!isnan(farg1.d)))
1335 farg1.d = float64_chs(farg1.d);
1337 return farg1.ll;
1340 /* fnmsub - fnmsub. */
1341 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1343 CPU_DoubleU farg1, farg2, farg3;
1345 farg1.ll = arg1;
1346 farg2.ll = arg2;
1347 farg3.ll = arg3;
1349 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1350 float64_is_signaling_nan(farg2.d) ||
1351 float64_is_signaling_nan(farg3.d))) {
1352 /* sNaN operation */
1353 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1354 } else {
1355 #if USE_PRECISE_EMULATION
1356 #ifdef FLOAT128
1357 /* This is the way the PowerPC specification defines it */
1358 float128 ft0_128, ft1_128;
1360 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1361 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1362 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1363 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1364 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1365 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1366 #else
1367 /* This is OK on x86 hosts */
1368 farg1.d = (farg1.d * farg2.d) - farg3.d;
1369 #endif
1370 #else
1371 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1372 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1373 #endif
1374 if (likely(!isnan(farg1.d)))
1375 farg1.d = float64_chs(farg1.d);
1377 return farg1.ll;
1380 /* frsp - frsp. */
1381 uint64_t helper_frsp (uint64_t arg)
1383 CPU_DoubleU farg;
1384 farg.ll = arg;
1386 #if USE_PRECISE_EMULATION
1387 if (unlikely(float64_is_signaling_nan(farg.d))) {
1388 /* sNaN square root */
1389 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1390 } else {
1391 fard.d = float64_to_float32(farg.d, &env->fp_status);
1393 #else
1394 farg.d = float64_to_float32(farg.d, &env->fp_status);
1395 #endif
1396 return farg.ll;
1399 /* fsqrt - fsqrt. */
1400 uint64_t helper_fsqrt (uint64_t arg)
1402 CPU_DoubleU farg;
1403 farg.ll = arg;
1405 if (unlikely(float64_is_signaling_nan(farg.d))) {
1406 /* sNaN square root */
1407 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1408 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1409 /* Square root of a negative nonzero number */
1410 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1411 } else {
1412 farg.d = float64_sqrt(farg.d, &env->fp_status);
1414 return farg.ll;
1417 /* fre - fre. */
1418 uint64_t helper_fre (uint64_t arg)
1420 CPU_DoubleU farg;
1421 farg.ll = arg;
1423 if (unlikely(float64_is_signaling_nan(farg.d))) {
1424 /* sNaN reciprocal */
1425 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1426 } else if (unlikely(iszero(farg.d))) {
1427 /* Zero reciprocal */
1428 farg.ll = float_zero_divide_excp(1.0, farg.d);
1429 } else if (likely(isnormal(farg.d))) {
1430 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1431 } else {
1432 if (farg.ll == 0x8000000000000000ULL) {
1433 farg.ll = 0xFFF0000000000000ULL;
1434 } else if (farg.ll == 0x0000000000000000ULL) {
1435 farg.ll = 0x7FF0000000000000ULL;
1436 } else if (isnan(farg.d)) {
1437 farg.ll = 0x7FF8000000000000ULL;
1438 } else if (fpisneg(farg.d)) {
1439 farg.ll = 0x8000000000000000ULL;
1440 } else {
1441 farg.ll = 0x0000000000000000ULL;
1444 return farg.d;
1447 /* fres - fres. */
1448 uint64_t helper_fres (uint64_t arg)
1450 CPU_DoubleU farg;
1451 farg.ll = arg;
1453 if (unlikely(float64_is_signaling_nan(farg.d))) {
1454 /* sNaN reciprocal */
1455 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1456 } else if (unlikely(iszero(farg.d))) {
1457 /* Zero reciprocal */
1458 farg.ll = float_zero_divide_excp(1.0, farg.d);
1459 } else if (likely(isnormal(farg.d))) {
1460 #if USE_PRECISE_EMULATION
1461 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1462 farg.d = float64_to_float32(farg.d, &env->fp_status);
1463 #else
1464 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1465 #endif
1466 } else {
1467 if (farg.ll == 0x8000000000000000ULL) {
1468 farg.ll = 0xFFF0000000000000ULL;
1469 } else if (farg.ll == 0x0000000000000000ULL) {
1470 farg.ll = 0x7FF0000000000000ULL;
1471 } else if (isnan(farg.d)) {
1472 farg.ll = 0x7FF8000000000000ULL;
1473 } else if (fpisneg(farg.d)) {
1474 farg.ll = 0x8000000000000000ULL;
1475 } else {
1476 farg.ll = 0x0000000000000000ULL;
1479 return farg.ll;
1482 /* frsqrte - frsqrte. */
1483 uint64_t helper_frsqrte (uint64_t arg)
1485 CPU_DoubleU farg;
1486 farg.ll = arg;
1488 if (unlikely(float64_is_signaling_nan(farg.d))) {
1489 /* sNaN reciprocal square root */
1490 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1491 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1492 /* Reciprocal square root of a negative nonzero number */
1493 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1494 } else if (likely(isnormal(farg.d))) {
1495 farg.d = float64_sqrt(farg.d, &env->fp_status);
1496 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1497 } else {
1498 if (farg.ll == 0x8000000000000000ULL) {
1499 farg.ll = 0xFFF0000000000000ULL;
1500 } else if (farg.ll == 0x0000000000000000ULL) {
1501 farg.ll = 0x7FF0000000000000ULL;
1502 } else if (isnan(farg.d)) {
1503 farg.ll |= 0x000FFFFFFFFFFFFFULL;
1504 } else if (fpisneg(farg.d)) {
1505 farg.ll = 0x7FF8000000000000ULL;
1506 } else {
1507 farg.ll = 0x0000000000000000ULL;
1510 return farg.ll;
1513 /* fsel - fsel. */
1514 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1516 CPU_DoubleU farg1, farg2, farg3;
1518 farg1.ll = arg1;
1519 farg2.ll = arg2;
1520 farg3.ll = arg3;
1522 if (!fpisneg(farg1.d) || iszero(farg1.d))
1523 return farg2.ll;
1524 else
1525 return farg2.ll;
1528 uint32_t helper_fcmpu (uint64_t arg1, uint64_t arg2)
1530 CPU_DoubleU farg1, farg2;
1531 uint32_t ret = 0;
1532 farg1.ll = arg1;
1533 farg2.ll = arg2;
1535 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1536 float64_is_signaling_nan(farg2.d))) {
1537 /* sNaN comparison */
1538 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1539 } else {
1540 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1541 ret = 0x08UL;
1542 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1543 ret = 0x04UL;
1544 } else {
1545 ret = 0x02UL;
1548 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1549 env->fpscr |= ret << FPSCR_FPRF;
1550 return ret;
1553 uint32_t helper_fcmpo (uint64_t arg1, uint64_t arg2)
1555 CPU_DoubleU farg1, farg2;
1556 uint32_t ret = 0;
1557 farg1.ll = arg1;
1558 farg2.ll = arg2;
1560 if (unlikely(float64_is_nan(farg1.d) ||
1561 float64_is_nan(farg2.d))) {
1562 if (float64_is_signaling_nan(farg1.d) ||
1563 float64_is_signaling_nan(farg2.d)) {
1564 /* sNaN comparison */
1565 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1566 POWERPC_EXCP_FP_VXVC);
1567 } else {
1568 /* qNaN comparison */
1569 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1571 } else {
1572 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1573 ret = 0x08UL;
1574 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1575 ret = 0x04UL;
1576 } else {
1577 ret = 0x02UL;
1580 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1581 env->fpscr |= ret << FPSCR_FPRF;
1582 return ret;
1585 #if !defined (CONFIG_USER_ONLY)
1586 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
1588 void do_store_msr (void)
1590 T0 = hreg_store_msr(env, T0, 0);
1591 if (T0 != 0) {
1592 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1593 raise_exception(env, T0);
1597 static always_inline void __do_rfi (target_ulong nip, target_ulong msr,
1598 target_ulong msrm, int keep_msrh)
1600 #if defined(TARGET_PPC64)
1601 if (msr & (1ULL << MSR_SF)) {
1602 nip = (uint64_t)nip;
1603 msr &= (uint64_t)msrm;
1604 } else {
1605 nip = (uint32_t)nip;
1606 msr = (uint32_t)(msr & msrm);
1607 if (keep_msrh)
1608 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1610 #else
1611 nip = (uint32_t)nip;
1612 msr &= (uint32_t)msrm;
1613 #endif
1614 /* XXX: beware: this is false if VLE is supported */
1615 env->nip = nip & ~((target_ulong)0x00000003);
1616 hreg_store_msr(env, msr, 1);
1617 #if defined (DEBUG_OP)
1618 cpu_dump_rfi(env->nip, env->msr);
1619 #endif
1620 /* No need to raise an exception here,
1621 * as rfi is always the last insn of a TB
1623 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1626 void do_rfi (void)
1628 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1629 ~((target_ulong)0xFFFF0000), 1);
1632 #if defined(TARGET_PPC64)
1633 void do_rfid (void)
1635 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1636 ~((target_ulong)0xFFFF0000), 0);
1639 void do_hrfid (void)
1641 __do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1642 ~((target_ulong)0xFFFF0000), 0);
1644 #endif
1645 #endif
1647 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1649 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1650 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1651 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1652 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1653 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1654 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1658 #if defined(TARGET_PPC64)
1659 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1661 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1662 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1663 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1664 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1665 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1666 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1668 #endif
1670 /*****************************************************************************/
1671 /* PowerPC 601 specific instructions (POWER bridge) */
1672 void do_POWER_abso (void)
1674 if ((int32_t)T0 == INT32_MIN) {
1675 T0 = INT32_MAX;
1676 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1677 } else if ((int32_t)T0 < 0) {
1678 T0 = -T0;
1679 env->xer &= ~(1 << XER_OV);
1680 } else {
1681 env->xer &= ~(1 << XER_OV);
1685 void do_POWER_clcs (void)
1687 switch (T0) {
1688 case 0x0CUL:
1689 /* Instruction cache line size */
1690 T0 = env->icache_line_size;
1691 break;
1692 case 0x0DUL:
1693 /* Data cache line size */
1694 T0 = env->dcache_line_size;
1695 break;
1696 case 0x0EUL:
1697 /* Minimum cache line size */
1698 T0 = env->icache_line_size < env->dcache_line_size ?
1699 env->icache_line_size : env->dcache_line_size;
1700 break;
1701 case 0x0FUL:
1702 /* Maximum cache line size */
1703 T0 = env->icache_line_size > env->dcache_line_size ?
1704 env->icache_line_size : env->dcache_line_size;
1705 break;
1706 default:
1707 /* Undefined */
1708 break;
1712 void do_POWER_div (void)
1714 uint64_t tmp;
1716 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1717 (int32_t)T1 == 0) {
1718 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1719 env->spr[SPR_MQ] = 0;
1720 } else {
1721 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1722 env->spr[SPR_MQ] = tmp % T1;
1723 T0 = tmp / (int32_t)T1;
1727 void do_POWER_divo (void)
1729 int64_t tmp;
1731 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1732 (int32_t)T1 == 0) {
1733 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1734 env->spr[SPR_MQ] = 0;
1735 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1736 } else {
1737 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1738 env->spr[SPR_MQ] = tmp % T1;
1739 tmp /= (int32_t)T1;
1740 if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
1741 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1742 } else {
1743 env->xer &= ~(1 << XER_OV);
1745 T0 = tmp;
1749 void do_POWER_divs (void)
1751 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1752 (int32_t)T1 == 0) {
1753 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1754 env->spr[SPR_MQ] = 0;
1755 } else {
1756 env->spr[SPR_MQ] = T0 % T1;
1757 T0 = (int32_t)T0 / (int32_t)T1;
1761 void do_POWER_divso (void)
1763 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1764 (int32_t)T1 == 0) {
1765 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1766 env->spr[SPR_MQ] = 0;
1767 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1768 } else {
1769 T0 = (int32_t)T0 / (int32_t)T1;
1770 env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
1771 env->xer &= ~(1 << XER_OV);
1775 void do_POWER_dozo (void)
1777 if ((int32_t)T1 > (int32_t)T0) {
1778 T2 = T0;
1779 T0 = T1 - T0;
1780 if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
1781 ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
1782 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1783 } else {
1784 env->xer &= ~(1 << XER_OV);
1786 } else {
1787 T0 = 0;
1788 env->xer &= ~(1 << XER_OV);
1792 void do_POWER_maskg (void)
1794 uint32_t ret;
1796 if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
1797 ret = UINT32_MAX;
1798 } else {
1799 ret = (UINT32_MAX >> ((uint32_t)T0)) ^
1800 ((UINT32_MAX >> ((uint32_t)T1)) >> 1);
1801 if ((uint32_t)T0 > (uint32_t)T1)
1802 ret = ~ret;
1804 T0 = ret;
1807 void do_POWER_mulo (void)
1809 uint64_t tmp;
1811 tmp = (uint64_t)T0 * (uint64_t)T1;
1812 env->spr[SPR_MQ] = tmp >> 32;
1813 T0 = tmp;
1814 if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) {
1815 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1816 } else {
1817 env->xer &= ~(1 << XER_OV);
1821 #if !defined (CONFIG_USER_ONLY)
1822 void do_POWER_rac (void)
1824 mmu_ctx_t ctx;
1825 int nb_BATs;
1827 /* We don't have to generate many instances of this instruction,
1828 * as rac is supervisor only.
1830 /* XXX: FIX THIS: Pretend we have no BAT */
1831 nb_BATs = env->nb_BATs;
1832 env->nb_BATs = 0;
1833 if (get_physical_address(env, &ctx, T0, 0, ACCESS_INT) == 0)
1834 T0 = ctx.raddr;
1835 env->nb_BATs = nb_BATs;
1838 void do_POWER_rfsvc (void)
1840 __do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1843 void do_store_hid0_601 (void)
1845 uint32_t hid0;
1847 hid0 = env->spr[SPR_HID0];
1848 if ((T0 ^ hid0) & 0x00000008) {
1849 /* Change current endianness */
1850 env->hflags &= ~(1 << MSR_LE);
1851 env->hflags_nmsr &= ~(1 << MSR_LE);
1852 env->hflags_nmsr |= (1 << MSR_LE) & (((T0 >> 3) & 1) << MSR_LE);
1853 env->hflags |= env->hflags_nmsr;
1854 if (loglevel != 0) {
1855 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
1856 __func__, T0 & 0x8 ? 'l' : 'b', env->hflags);
1859 env->spr[SPR_HID0] = T0;
1861 #endif
1863 /*****************************************************************************/
1864 /* 602 specific instructions */
1865 /* mfrom is the most crazy instruction ever seen, imho ! */
1866 /* Real implementation uses a ROM table. Do the same */
1867 #define USE_MFROM_ROM_TABLE
1868 target_ulong helper_602_mfrom (target_ulong arg)
1870 if (likely(arg < 602)) {
1871 #if defined(USE_MFROM_ROM_TABLE)
1872 #include "mfrom_table.c"
1873 return mfrom_ROM_table[T0];
1874 #else
1875 double d;
1876 /* Extremly decomposed:
1877 * -arg / 256
1878 * return 256 * log10(10 + 1.0) + 0.5
1880 d = arg;
1881 d = float64_div(d, 256, &env->fp_status);
1882 d = float64_chs(d);
1883 d = exp10(d); // XXX: use float emulation function
1884 d = float64_add(d, 1.0, &env->fp_status);
1885 d = log10(d); // XXX: use float emulation function
1886 d = float64_mul(d, 256, &env->fp_status);
1887 d = float64_add(d, 0.5, &env->fp_status);
1888 return float64_round_to_int(d, &env->fp_status);
1889 #endif
1890 } else {
1891 return 0;
1895 /*****************************************************************************/
1896 /* Embedded PowerPC specific helpers */
1898 /* XXX: to be improved to check access rights when in user-mode */
1899 void do_load_dcr (void)
1901 target_ulong val;
1903 if (unlikely(env->dcr_env == NULL)) {
1904 if (loglevel != 0) {
1905 fprintf(logfile, "No DCR environment\n");
1907 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1908 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1909 } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) {
1910 if (loglevel != 0) {
1911 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1913 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1914 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1915 } else {
1916 T0 = val;
1920 void do_store_dcr (void)
1922 if (unlikely(env->dcr_env == NULL)) {
1923 if (loglevel != 0) {
1924 fprintf(logfile, "No DCR environment\n");
1926 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1927 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1928 } else if (unlikely(ppc_dcr_write(env->dcr_env, T0, T1) != 0)) {
1929 if (loglevel != 0) {
1930 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1932 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1933 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1937 #if !defined(CONFIG_USER_ONLY)
1938 void do_40x_rfci (void)
1940 __do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1941 ~((target_ulong)0xFFFF0000), 0);
1944 void do_rfci (void)
1946 __do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1947 ~((target_ulong)0x3FFF0000), 0);
1950 void do_rfdi (void)
1952 __do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1953 ~((target_ulong)0x3FFF0000), 0);
1956 void do_rfmci (void)
1958 __do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1959 ~((target_ulong)0x3FFF0000), 0);
1962 void do_load_403_pb (int num)
1964 T0 = env->pb[num];
1967 void do_store_403_pb (int num)
1969 if (likely(env->pb[num] != T0)) {
1970 env->pb[num] = T0;
1971 /* Should be optimized */
1972 tlb_flush(env, 1);
1975 #endif
1977 /* 440 specific */
1978 void do_440_dlmzb (void)
1980 target_ulong mask;
1981 int i;
1983 i = 1;
1984 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1985 if ((T0 & mask) == 0)
1986 goto done;
1987 i++;
1989 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1990 if ((T1 & mask) == 0)
1991 break;
1992 i++;
1994 done:
1995 T0 = i;
1998 /*****************************************************************************/
1999 /* SPE extension helpers */
2000 /* Use a table to make this quicker */
2001 static uint8_t hbrev[16] = {
2002 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2003 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2006 static always_inline uint8_t byte_reverse (uint8_t val)
2008 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2011 static always_inline uint32_t word_reverse (uint32_t val)
2013 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2014 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2017 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2018 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2020 uint32_t a, b, d, mask;
2022 mask = UINT32_MAX >> (32 - MASKBITS);
2023 a = arg1 & mask;
2024 b = arg2 & mask;
2025 d = word_reverse(1 + word_reverse(a | ~b));
2026 return (arg1 & ~mask) | (d & b);
2029 uint32_t helper_cntlsw32 (uint32_t val)
2031 if (val & 0x80000000)
2032 return clz32(~val);
2033 else
2034 return clz32(val);
2037 uint32_t helper_cntlzw32 (uint32_t val)
2039 return clz32(val);
2042 /* Single-precision floating-point conversions */
2043 static always_inline uint32_t efscfsi (uint32_t val)
2045 CPU_FloatU u;
2047 u.f = int32_to_float32(val, &env->spe_status);
2049 return u.l;
2052 static always_inline uint32_t efscfui (uint32_t val)
2054 CPU_FloatU u;
2056 u.f = uint32_to_float32(val, &env->spe_status);
2058 return u.l;
2061 static always_inline int32_t efsctsi (uint32_t val)
2063 CPU_FloatU u;
2065 u.l = val;
2066 /* NaN are not treated the same way IEEE 754 does */
2067 if (unlikely(isnan(u.f)))
2068 return 0;
2070 return float32_to_int32(u.f, &env->spe_status);
2073 static always_inline uint32_t efsctui (uint32_t val)
2075 CPU_FloatU u;
2077 u.l = val;
2078 /* NaN are not treated the same way IEEE 754 does */
2079 if (unlikely(isnan(u.f)))
2080 return 0;
2082 return float32_to_uint32(u.f, &env->spe_status);
2085 static always_inline uint32_t efsctsiz (uint32_t val)
2087 CPU_FloatU u;
2089 u.l = val;
2090 /* NaN are not treated the same way IEEE 754 does */
2091 if (unlikely(isnan(u.f)))
2092 return 0;
2094 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2097 static always_inline uint32_t efsctuiz (uint32_t val)
2099 CPU_FloatU u;
2101 u.l = val;
2102 /* NaN are not treated the same way IEEE 754 does */
2103 if (unlikely(isnan(u.f)))
2104 return 0;
2106 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2109 static always_inline uint32_t efscfsf (uint32_t val)
2111 CPU_FloatU u;
2112 float32 tmp;
2114 u.f = int32_to_float32(val, &env->spe_status);
2115 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2116 u.f = float32_div(u.f, tmp, &env->spe_status);
2118 return u.l;
2121 static always_inline uint32_t efscfuf (uint32_t val)
2123 CPU_FloatU u;
2124 float32 tmp;
2126 u.f = uint32_to_float32(val, &env->spe_status);
2127 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2128 u.f = float32_div(u.f, tmp, &env->spe_status);
2130 return u.l;
2133 static always_inline uint32_t efsctsf (uint32_t val)
2135 CPU_FloatU u;
2136 float32 tmp;
2138 u.l = val;
2139 /* NaN are not treated the same way IEEE 754 does */
2140 if (unlikely(isnan(u.f)))
2141 return 0;
2142 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2143 u.f = float32_mul(u.f, tmp, &env->spe_status);
2145 return float32_to_int32(u.f, &env->spe_status);
2148 static always_inline uint32_t efsctuf (uint32_t val)
2150 CPU_FloatU u;
2151 float32 tmp;
2153 u.l = val;
2154 /* NaN are not treated the same way IEEE 754 does */
2155 if (unlikely(isnan(u.f)))
2156 return 0;
2157 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2158 u.f = float32_mul(u.f, tmp, &env->spe_status);
2160 return float32_to_uint32(u.f, &env->spe_status);
2163 #define HELPER_SPE_SINGLE_CONV(name) \
2164 uint32_t helper_e##name (uint32_t val) \
2166 return e##name(val); \
2168 /* efscfsi */
2169 HELPER_SPE_SINGLE_CONV(fscfsi);
2170 /* efscfui */
2171 HELPER_SPE_SINGLE_CONV(fscfui);
2172 /* efscfuf */
2173 HELPER_SPE_SINGLE_CONV(fscfuf);
2174 /* efscfsf */
2175 HELPER_SPE_SINGLE_CONV(fscfsf);
2176 /* efsctsi */
2177 HELPER_SPE_SINGLE_CONV(fsctsi);
2178 /* efsctui */
2179 HELPER_SPE_SINGLE_CONV(fsctui);
2180 /* efsctsiz */
2181 HELPER_SPE_SINGLE_CONV(fsctsiz);
2182 /* efsctuiz */
2183 HELPER_SPE_SINGLE_CONV(fsctuiz);
2184 /* efsctsf */
2185 HELPER_SPE_SINGLE_CONV(fsctsf);
2186 /* efsctuf */
2187 HELPER_SPE_SINGLE_CONV(fsctuf);
2189 #define HELPER_SPE_VECTOR_CONV(name) \
2190 uint64_t helper_ev##name (uint64_t val) \
2192 return ((uint64_t)e##name(val >> 32) << 32) | \
2193 (uint64_t)e##name(val); \
2195 /* evfscfsi */
2196 HELPER_SPE_VECTOR_CONV(fscfsi);
2197 /* evfscfui */
2198 HELPER_SPE_VECTOR_CONV(fscfui);
2199 /* evfscfuf */
2200 HELPER_SPE_VECTOR_CONV(fscfuf);
2201 /* evfscfsf */
2202 HELPER_SPE_VECTOR_CONV(fscfsf);
2203 /* evfsctsi */
2204 HELPER_SPE_VECTOR_CONV(fsctsi);
2205 /* evfsctui */
2206 HELPER_SPE_VECTOR_CONV(fsctui);
2207 /* evfsctsiz */
2208 HELPER_SPE_VECTOR_CONV(fsctsiz);
2209 /* evfsctuiz */
2210 HELPER_SPE_VECTOR_CONV(fsctuiz);
2211 /* evfsctsf */
2212 HELPER_SPE_VECTOR_CONV(fsctsf);
2213 /* evfsctuf */
2214 HELPER_SPE_VECTOR_CONV(fsctuf);
2216 /* Single-precision floating-point arithmetic */
2217 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2219 CPU_FloatU u1, u2;
2220 u1.l = op1;
2221 u2.l = op2;
2222 u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2223 return u1.l;
2226 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2228 CPU_FloatU u1, u2;
2229 u1.l = op1;
2230 u2.l = op2;
2231 u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2232 return u1.l;
2235 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2237 CPU_FloatU u1, u2;
2238 u1.l = op1;
2239 u2.l = op2;
2240 u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2241 return u1.l;
2244 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2246 CPU_FloatU u1, u2;
2247 u1.l = op1;
2248 u2.l = op2;
2249 u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2250 return u1.l;
2253 #define HELPER_SPE_SINGLE_ARITH(name) \
2254 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2256 return e##name(op1, op2); \
2258 /* efsadd */
2259 HELPER_SPE_SINGLE_ARITH(fsadd);
2260 /* efssub */
2261 HELPER_SPE_SINGLE_ARITH(fssub);
2262 /* efsmul */
2263 HELPER_SPE_SINGLE_ARITH(fsmul);
2264 /* efsdiv */
2265 HELPER_SPE_SINGLE_ARITH(fsdiv);
2267 #define HELPER_SPE_VECTOR_ARITH(name) \
2268 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
2270 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
2271 (uint64_t)e##name(op1, op2); \
2273 /* evfsadd */
2274 HELPER_SPE_VECTOR_ARITH(fsadd);
2275 /* evfssub */
2276 HELPER_SPE_VECTOR_ARITH(fssub);
2277 /* evfsmul */
2278 HELPER_SPE_VECTOR_ARITH(fsmul);
2279 /* evfsdiv */
2280 HELPER_SPE_VECTOR_ARITH(fsdiv);
2282 /* Single-precision floating-point comparisons */
2283 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2285 CPU_FloatU u1, u2;
2286 u1.l = op1;
2287 u2.l = op2;
2288 return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2291 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2293 CPU_FloatU u1, u2;
2294 u1.l = op1;
2295 u2.l = op2;
2296 return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2299 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2301 CPU_FloatU u1, u2;
2302 u1.l = op1;
2303 u2.l = op2;
2304 return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2307 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2309 /* XXX: TODO: test special values (NaN, infinites, ...) */
2310 return efststlt(op1, op2);
2313 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2315 /* XXX: TODO: test special values (NaN, infinites, ...) */
2316 return efststgt(op1, op2);
2319 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2321 /* XXX: TODO: test special values (NaN, infinites, ...) */
2322 return efststeq(op1, op2);
2325 #define HELPER_SINGLE_SPE_CMP(name) \
2326 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2328 return e##name(op1, op2) << 2; \
2330 /* efststlt */
2331 HELPER_SINGLE_SPE_CMP(fststlt);
2332 /* efststgt */
2333 HELPER_SINGLE_SPE_CMP(fststgt);
2334 /* efststeq */
2335 HELPER_SINGLE_SPE_CMP(fststeq);
2336 /* efscmplt */
2337 HELPER_SINGLE_SPE_CMP(fscmplt);
2338 /* efscmpgt */
2339 HELPER_SINGLE_SPE_CMP(fscmpgt);
2340 /* efscmpeq */
2341 HELPER_SINGLE_SPE_CMP(fscmpeq);
2343 static always_inline uint32_t evcmp_merge (int t0, int t1)
2345 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2348 #define HELPER_VECTOR_SPE_CMP(name) \
2349 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
2351 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
2353 /* evfststlt */
2354 HELPER_VECTOR_SPE_CMP(fststlt);
2355 /* evfststgt */
2356 HELPER_VECTOR_SPE_CMP(fststgt);
2357 /* evfststeq */
2358 HELPER_VECTOR_SPE_CMP(fststeq);
2359 /* evfscmplt */
2360 HELPER_VECTOR_SPE_CMP(fscmplt);
2361 /* evfscmpgt */
2362 HELPER_VECTOR_SPE_CMP(fscmpgt);
2363 /* evfscmpeq */
2364 HELPER_VECTOR_SPE_CMP(fscmpeq);
2366 /* Double-precision floating-point conversion */
2367 uint64_t helper_efdcfsi (uint32_t val)
2369 CPU_DoubleU u;
2371 u.d = int32_to_float64(val, &env->spe_status);
2373 return u.ll;
2376 uint64_t helper_efdcfsid (uint64_t val)
2378 CPU_DoubleU u;
2380 u.d = int64_to_float64(val, &env->spe_status);
2382 return u.ll;
2385 uint64_t helper_efdcfui (uint32_t val)
2387 CPU_DoubleU u;
2389 u.d = uint32_to_float64(val, &env->spe_status);
2391 return u.ll;
2394 uint64_t helper_efdcfuid (uint64_t val)
2396 CPU_DoubleU u;
2398 u.d = uint64_to_float64(val, &env->spe_status);
2400 return u.ll;
2403 uint32_t helper_efdctsi (uint64_t val)
2405 CPU_DoubleU u;
2407 u.ll = val;
2408 /* NaN are not treated the same way IEEE 754 does */
2409 if (unlikely(isnan(u.d)))
2410 return 0;
2412 return float64_to_int32(u.d, &env->spe_status);
2415 uint32_t helper_efdctui (uint64_t val)
2417 CPU_DoubleU u;
2419 u.ll = val;
2420 /* NaN are not treated the same way IEEE 754 does */
2421 if (unlikely(isnan(u.d)))
2422 return 0;
2424 return float64_to_uint32(u.d, &env->spe_status);
2427 uint32_t helper_efdctsiz (uint64_t val)
2429 CPU_DoubleU u;
2431 u.ll = val;
2432 /* NaN are not treated the same way IEEE 754 does */
2433 if (unlikely(isnan(u.d)))
2434 return 0;
2436 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2439 uint64_t helper_efdctsidz (uint64_t val)
2441 CPU_DoubleU u;
2443 u.ll = val;
2444 /* NaN are not treated the same way IEEE 754 does */
2445 if (unlikely(isnan(u.d)))
2446 return 0;
2448 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2451 uint32_t helper_efdctuiz (uint64_t val)
2453 CPU_DoubleU u;
2455 u.ll = val;
2456 /* NaN are not treated the same way IEEE 754 does */
2457 if (unlikely(isnan(u.d)))
2458 return 0;
2460 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2463 uint64_t helper_efdctuidz (uint64_t val)
2465 CPU_DoubleU u;
2467 u.ll = val;
2468 /* NaN are not treated the same way IEEE 754 does */
2469 if (unlikely(isnan(u.d)))
2470 return 0;
2472 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2475 uint64_t helper_efdcfsf (uint32_t val)
2477 CPU_DoubleU u;
2478 float64 tmp;
2480 u.d = int32_to_float64(val, &env->spe_status);
2481 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2482 u.d = float64_div(u.d, tmp, &env->spe_status);
2484 return u.ll;
2487 uint64_t helper_efdcfuf (uint32_t val)
2489 CPU_DoubleU u;
2490 float64 tmp;
2492 u.d = uint32_to_float64(val, &env->spe_status);
2493 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2494 u.d = float64_div(u.d, tmp, &env->spe_status);
2496 return u.ll;
2499 uint32_t helper_efdctsf (uint64_t val)
2501 CPU_DoubleU u;
2502 float64 tmp;
2504 u.ll = val;
2505 /* NaN are not treated the same way IEEE 754 does */
2506 if (unlikely(isnan(u.d)))
2507 return 0;
2508 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2509 u.d = float64_mul(u.d, tmp, &env->spe_status);
2511 return float64_to_int32(u.d, &env->spe_status);
2514 uint32_t helper_efdctuf (uint64_t val)
2516 CPU_DoubleU u;
2517 float64 tmp;
2519 u.ll = val;
2520 /* NaN are not treated the same way IEEE 754 does */
2521 if (unlikely(isnan(u.d)))
2522 return 0;
2523 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2524 u.d = float64_mul(u.d, tmp, &env->spe_status);
2526 return float64_to_uint32(u.d, &env->spe_status);
2529 uint32_t helper_efscfd (uint64_t val)
2531 CPU_DoubleU u1;
2532 CPU_FloatU u2;
2534 u1.ll = val;
2535 u2.f = float64_to_float32(u1.d, &env->spe_status);
2537 return u2.l;
2540 uint64_t helper_efdcfs (uint32_t val)
2542 CPU_DoubleU u2;
2543 CPU_FloatU u1;
2545 u1.l = val;
2546 u2.d = float32_to_float64(u1.f, &env->spe_status);
2548 return u2.ll;
2551 /* Double precision fixed-point arithmetic */
2552 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2554 CPU_DoubleU u1, u2;
2555 u1.ll = op1;
2556 u2.ll = op2;
2557 u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2558 return u1.ll;
2561 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2563 CPU_DoubleU u1, u2;
2564 u1.ll = op1;
2565 u2.ll = op2;
2566 u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2567 return u1.ll;
2570 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2572 CPU_DoubleU u1, u2;
2573 u1.ll = op1;
2574 u2.ll = op2;
2575 u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2576 return u1.ll;
2579 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2581 CPU_DoubleU u1, u2;
2582 u1.ll = op1;
2583 u2.ll = op2;
2584 u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2585 return u1.ll;
2588 /* Double precision floating point helpers */
2589 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2591 CPU_DoubleU u1, u2;
2592 u1.ll = op1;
2593 u2.ll = op2;
2594 return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2597 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2599 CPU_DoubleU u1, u2;
2600 u1.ll = op1;
2601 u2.ll = op2;
2602 return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2605 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2607 CPU_DoubleU u1, u2;
2608 u1.ll = op1;
2609 u2.ll = op2;
2610 return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2613 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2615 /* XXX: TODO: test special values (NaN, infinites, ...) */
2616 return helper_efdtstlt(op1, op2);
2619 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2621 /* XXX: TODO: test special values (NaN, infinites, ...) */
2622 return helper_efdtstgt(op1, op2);
2625 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2627 /* XXX: TODO: test special values (NaN, infinites, ...) */
2628 return helper_efdtsteq(op1, op2);
2631 /*****************************************************************************/
2632 /* Softmmu support */
2633 #if !defined (CONFIG_USER_ONLY)
2635 #define MMUSUFFIX _mmu
2637 #define SHIFT 0
2638 #include "softmmu_template.h"
2640 #define SHIFT 1
2641 #include "softmmu_template.h"
2643 #define SHIFT 2
2644 #include "softmmu_template.h"
2646 #define SHIFT 3
2647 #include "softmmu_template.h"
2649 /* try to fill the TLB and return an exception if error. If retaddr is
2650 NULL, it means that the function was called in C code (i.e. not
2651 from generated code or from helper.c) */
2652 /* XXX: fix it to restore all registers */
2653 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2655 TranslationBlock *tb;
2656 CPUState *saved_env;
2657 unsigned long pc;
2658 int ret;
2660 /* XXX: hack to restore env in all cases, even if not called from
2661 generated code */
2662 saved_env = env;
2663 env = cpu_single_env;
2664 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2665 if (unlikely(ret != 0)) {
2666 if (likely(retaddr)) {
2667 /* now we have a real cpu fault */
2668 pc = (unsigned long)retaddr;
2669 tb = tb_find_pc(pc);
2670 if (likely(tb)) {
2671 /* the PC is inside the translated code. It means that we have
2672 a virtual CPU fault */
2673 cpu_restore_state(tb, env, pc, NULL);
2676 raise_exception_err(env, env->exception_index, env->error_code);
2678 env = saved_env;
2681 /* Software driven TLBs management */
2682 /* PowerPC 602/603 software TLB load instructions helpers */
2683 static void helper_load_6xx_tlb (target_ulong new_EPN, int is_code)
2685 target_ulong RPN, CMP, EPN;
2686 int way;
2688 RPN = env->spr[SPR_RPA];
2689 if (is_code) {
2690 CMP = env->spr[SPR_ICMP];
2691 EPN = env->spr[SPR_IMISS];
2692 } else {
2693 CMP = env->spr[SPR_DCMP];
2694 EPN = env->spr[SPR_DMISS];
2696 way = (env->spr[SPR_SRR1] >> 17) & 1;
2697 #if defined (DEBUG_SOFTWARE_TLB)
2698 if (loglevel != 0) {
2699 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2700 " PTE1 " ADDRX " way %d\n",
2701 __func__, T0, EPN, CMP, RPN, way);
2703 #endif
2704 /* Store this TLB */
2705 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2706 way, is_code, CMP, RPN);
2709 void helper_load_6xx_tlbd (target_ulong EPN)
2711 helper_load_6xx_tlb(EPN, 0);
2714 void helper_load_6xx_tlbi (target_ulong EPN)
2716 helper_load_6xx_tlb(EPN, 1);
2719 /* PowerPC 74xx software TLB load instructions helpers */
2720 static void helper_load_74xx_tlb (target_ulong new_EPN, int is_code)
2722 target_ulong RPN, CMP, EPN;
2723 int way;
2725 RPN = env->spr[SPR_PTELO];
2726 CMP = env->spr[SPR_PTEHI];
2727 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2728 way = env->spr[SPR_TLBMISS] & 0x3;
2729 #if defined (DEBUG_SOFTWARE_TLB)
2730 if (loglevel != 0) {
2731 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2732 " PTE1 " ADDRX " way %d\n",
2733 __func__, T0, EPN, CMP, RPN, way);
2735 #endif
2736 /* Store this TLB */
2737 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2738 way, is_code, CMP, RPN);
2741 void helper_load_74xx_tlbd (target_ulong EPN)
2743 helper_load_74xx_tlb(EPN, 0);
2746 void helper_load_74xx_tlbi (target_ulong EPN)
2748 helper_load_74xx_tlb(EPN, 1);
2751 static always_inline target_ulong booke_tlb_to_page_size (int size)
2753 return 1024 << (2 * size);
2756 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2758 int size;
2760 switch (page_size) {
2761 case 0x00000400UL:
2762 size = 0x0;
2763 break;
2764 case 0x00001000UL:
2765 size = 0x1;
2766 break;
2767 case 0x00004000UL:
2768 size = 0x2;
2769 break;
2770 case 0x00010000UL:
2771 size = 0x3;
2772 break;
2773 case 0x00040000UL:
2774 size = 0x4;
2775 break;
2776 case 0x00100000UL:
2777 size = 0x5;
2778 break;
2779 case 0x00400000UL:
2780 size = 0x6;
2781 break;
2782 case 0x01000000UL:
2783 size = 0x7;
2784 break;
2785 case 0x04000000UL:
2786 size = 0x8;
2787 break;
2788 case 0x10000000UL:
2789 size = 0x9;
2790 break;
2791 case 0x40000000UL:
2792 size = 0xA;
2793 break;
2794 #if defined (TARGET_PPC64)
2795 case 0x000100000000ULL:
2796 size = 0xB;
2797 break;
2798 case 0x000400000000ULL:
2799 size = 0xC;
2800 break;
2801 case 0x001000000000ULL:
2802 size = 0xD;
2803 break;
2804 case 0x004000000000ULL:
2805 size = 0xE;
2806 break;
2807 case 0x010000000000ULL:
2808 size = 0xF;
2809 break;
2810 #endif
2811 default:
2812 size = -1;
2813 break;
2816 return size;
2819 /* Helpers for 4xx TLB management */
2820 void do_4xx_tlbre_lo (void)
2822 ppcemb_tlb_t *tlb;
2823 int size;
2825 T0 &= 0x3F;
2826 tlb = &env->tlb[T0].tlbe;
2827 T0 = tlb->EPN;
2828 if (tlb->prot & PAGE_VALID)
2829 T0 |= 0x400;
2830 size = booke_page_size_to_tlb(tlb->size);
2831 if (size < 0 || size > 0x7)
2832 size = 1;
2833 T0 |= size << 7;
2834 env->spr[SPR_40x_PID] = tlb->PID;
2837 void do_4xx_tlbre_hi (void)
2839 ppcemb_tlb_t *tlb;
2841 T0 &= 0x3F;
2842 tlb = &env->tlb[T0].tlbe;
2843 T0 = tlb->RPN;
2844 if (tlb->prot & PAGE_EXEC)
2845 T0 |= 0x200;
2846 if (tlb->prot & PAGE_WRITE)
2847 T0 |= 0x100;
2850 void do_4xx_tlbwe_hi (void)
2852 ppcemb_tlb_t *tlb;
2853 target_ulong page, end;
2855 #if defined (DEBUG_SOFTWARE_TLB)
2856 if (loglevel != 0) {
2857 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2859 #endif
2860 T0 &= 0x3F;
2861 tlb = &env->tlb[T0].tlbe;
2862 /* Invalidate previous TLB (if it's valid) */
2863 if (tlb->prot & PAGE_VALID) {
2864 end = tlb->EPN + tlb->size;
2865 #if defined (DEBUG_SOFTWARE_TLB)
2866 if (loglevel != 0) {
2867 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2868 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2870 #endif
2871 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2872 tlb_flush_page(env, page);
2874 tlb->size = booke_tlb_to_page_size((T1 >> 7) & 0x7);
2875 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2876 * If this ever occurs, one should use the ppcemb target instead
2877 * of the ppc or ppc64 one
2879 if ((T1 & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2880 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2881 "are not supported (%d)\n",
2882 tlb->size, TARGET_PAGE_SIZE, (int)((T1 >> 7) & 0x7));
2884 tlb->EPN = T1 & ~(tlb->size - 1);
2885 if (T1 & 0x40)
2886 tlb->prot |= PAGE_VALID;
2887 else
2888 tlb->prot &= ~PAGE_VALID;
2889 if (T1 & 0x20) {
2890 /* XXX: TO BE FIXED */
2891 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2893 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2894 tlb->attr = T1 & 0xFF;
2895 #if defined (DEBUG_SOFTWARE_TLB)
2896 if (loglevel != 0) {
2897 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2898 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2899 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2900 tlb->prot & PAGE_READ ? 'r' : '-',
2901 tlb->prot & PAGE_WRITE ? 'w' : '-',
2902 tlb->prot & PAGE_EXEC ? 'x' : '-',
2903 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2905 #endif
2906 /* Invalidate new TLB (if valid) */
2907 if (tlb->prot & PAGE_VALID) {
2908 end = tlb->EPN + tlb->size;
2909 #if defined (DEBUG_SOFTWARE_TLB)
2910 if (loglevel != 0) {
2911 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2912 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2914 #endif
2915 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2916 tlb_flush_page(env, page);
2920 void do_4xx_tlbwe_lo (void)
2922 ppcemb_tlb_t *tlb;
2924 #if defined (DEBUG_SOFTWARE_TLB)
2925 if (loglevel != 0) {
2926 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2928 #endif
2929 T0 &= 0x3F;
2930 tlb = &env->tlb[T0].tlbe;
2931 tlb->RPN = T1 & 0xFFFFFC00;
2932 tlb->prot = PAGE_READ;
2933 if (T1 & 0x200)
2934 tlb->prot |= PAGE_EXEC;
2935 if (T1 & 0x100)
2936 tlb->prot |= PAGE_WRITE;
2937 #if defined (DEBUG_SOFTWARE_TLB)
2938 if (loglevel != 0) {
2939 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2940 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2941 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2942 tlb->prot & PAGE_READ ? 'r' : '-',
2943 tlb->prot & PAGE_WRITE ? 'w' : '-',
2944 tlb->prot & PAGE_EXEC ? 'x' : '-',
2945 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2947 #endif
2950 /* PowerPC 440 TLB management */
2951 void do_440_tlbwe (int word)
2953 ppcemb_tlb_t *tlb;
2954 target_ulong EPN, RPN, size;
2955 int do_flush_tlbs;
2957 #if defined (DEBUG_SOFTWARE_TLB)
2958 if (loglevel != 0) {
2959 fprintf(logfile, "%s word %d T0 " TDX " T1 " TDX "\n",
2960 __func__, word, T0, T1);
2962 #endif
2963 do_flush_tlbs = 0;
2964 T0 &= 0x3F;
2965 tlb = &env->tlb[T0].tlbe;
2966 switch (word) {
2967 default:
2968 /* Just here to please gcc */
2969 case 0:
2970 EPN = T1 & 0xFFFFFC00;
2971 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
2972 do_flush_tlbs = 1;
2973 tlb->EPN = EPN;
2974 size = booke_tlb_to_page_size((T1 >> 4) & 0xF);
2975 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
2976 do_flush_tlbs = 1;
2977 tlb->size = size;
2978 tlb->attr &= ~0x1;
2979 tlb->attr |= (T1 >> 8) & 1;
2980 if (T1 & 0x200) {
2981 tlb->prot |= PAGE_VALID;
2982 } else {
2983 if (tlb->prot & PAGE_VALID) {
2984 tlb->prot &= ~PAGE_VALID;
2985 do_flush_tlbs = 1;
2988 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2989 if (do_flush_tlbs)
2990 tlb_flush(env, 1);
2991 break;
2992 case 1:
2993 RPN = T1 & 0xFFFFFC0F;
2994 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
2995 tlb_flush(env, 1);
2996 tlb->RPN = RPN;
2997 break;
2998 case 2:
2999 tlb->attr = (tlb->attr & 0x1) | (T1 & 0x0000FF00);
3000 tlb->prot = tlb->prot & PAGE_VALID;
3001 if (T1 & 0x1)
3002 tlb->prot |= PAGE_READ << 4;
3003 if (T1 & 0x2)
3004 tlb->prot |= PAGE_WRITE << 4;
3005 if (T1 & 0x4)
3006 tlb->prot |= PAGE_EXEC << 4;
3007 if (T1 & 0x8)
3008 tlb->prot |= PAGE_READ;
3009 if (T1 & 0x10)
3010 tlb->prot |= PAGE_WRITE;
3011 if (T1 & 0x20)
3012 tlb->prot |= PAGE_EXEC;
3013 break;
3017 void do_440_tlbre (int word)
3019 ppcemb_tlb_t *tlb;
3020 int size;
3022 T0 &= 0x3F;
3023 tlb = &env->tlb[T0].tlbe;
3024 switch (word) {
3025 default:
3026 /* Just here to please gcc */
3027 case 0:
3028 T0 = tlb->EPN;
3029 size = booke_page_size_to_tlb(tlb->size);
3030 if (size < 0 || size > 0xF)
3031 size = 1;
3032 T0 |= size << 4;
3033 if (tlb->attr & 0x1)
3034 T0 |= 0x100;
3035 if (tlb->prot & PAGE_VALID)
3036 T0 |= 0x200;
3037 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3038 env->spr[SPR_440_MMUCR] |= tlb->PID;
3039 break;
3040 case 1:
3041 T0 = tlb->RPN;
3042 break;
3043 case 2:
3044 T0 = tlb->attr & ~0x1;
3045 if (tlb->prot & (PAGE_READ << 4))
3046 T0 |= 0x1;
3047 if (tlb->prot & (PAGE_WRITE << 4))
3048 T0 |= 0x2;
3049 if (tlb->prot & (PAGE_EXEC << 4))
3050 T0 |= 0x4;
3051 if (tlb->prot & PAGE_READ)
3052 T0 |= 0x8;
3053 if (tlb->prot & PAGE_WRITE)
3054 T0 |= 0x10;
3055 if (tlb->prot & PAGE_EXEC)
3056 T0 |= 0x20;
3057 break;
3060 #endif /* !CONFIG_USER_ONLY */