Better solution for the alignment problem
[qemu/mini2440.git] / target-ppc / op_helper.c
blob544d9066640794019f193a80b3a6677dddda9a71
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec.h"
21 #include "host-utils.h"
23 #include "helper_regs.h"
24 #include "op_helper.h"
26 #define MEMSUFFIX _raw
27 #include "op_helper.h"
28 #include "op_helper_mem.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #define MEMSUFFIX _user
31 #include "op_helper.h"
32 #include "op_helper_mem.h"
33 #define MEMSUFFIX _kernel
34 #include "op_helper.h"
35 #include "op_helper_mem.h"
36 #define MEMSUFFIX _hypv
37 #include "op_helper.h"
38 #include "op_helper_mem.h"
39 #endif
41 //#define DEBUG_OP
42 //#define DEBUG_EXCEPTIONS
43 //#define DEBUG_SOFTWARE_TLB
45 /*****************************************************************************/
46 /* Exceptions processing helpers */
48 void do_raise_exception_err (uint32_t exception, int error_code)
50 #if 0
51 printf("Raise exception %3x code : %d\n", exception, error_code);
52 #endif
53 env->exception_index = exception;
54 env->error_code = error_code;
55 cpu_loop_exit();
58 void do_raise_exception (uint32_t exception)
60 do_raise_exception_err(exception, 0);
63 void cpu_dump_EA (target_ulong EA);
64 void do_print_mem_EA (target_ulong EA)
66 cpu_dump_EA(EA);
69 /*****************************************************************************/
70 /* Registers load and stores */
71 void do_load_cr (void)
73 T0 = (env->crf[0] << 28) |
74 (env->crf[1] << 24) |
75 (env->crf[2] << 20) |
76 (env->crf[3] << 16) |
77 (env->crf[4] << 12) |
78 (env->crf[5] << 8) |
79 (env->crf[6] << 4) |
80 (env->crf[7] << 0);
83 void do_store_cr (uint32_t mask)
85 int i, sh;
87 for (i = 0, sh = 7; i < 8; i++, sh--) {
88 if (mask & (1 << sh))
89 env->crf[i] = (T0 >> (sh * 4)) & 0xFUL;
93 #if defined(TARGET_PPC64)
94 void do_store_pri (int prio)
96 env->spr[SPR_PPR] &= ~0x001C000000000000ULL;
97 env->spr[SPR_PPR] |= ((uint64_t)prio & 0x7) << 50;
99 #endif
101 target_ulong ppc_load_dump_spr (int sprn)
103 if (loglevel != 0) {
104 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
105 sprn, sprn, env->spr[sprn]);
108 return env->spr[sprn];
111 void ppc_store_dump_spr (int sprn, target_ulong val)
113 if (loglevel != 0) {
114 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
115 sprn, sprn, env->spr[sprn], val);
117 env->spr[sprn] = val;
120 /*****************************************************************************/
121 /* Fixed point operations helpers */
122 void do_adde (void)
124 T2 = T0;
125 T0 += T1 + xer_ca;
126 if (likely(!((uint32_t)T0 < (uint32_t)T2 ||
127 (xer_ca == 1 && (uint32_t)T0 == (uint32_t)T2)))) {
128 xer_ca = 0;
129 } else {
130 xer_ca = 1;
134 #if defined(TARGET_PPC64)
135 void do_adde_64 (void)
137 T2 = T0;
138 T0 += T1 + xer_ca;
139 if (likely(!((uint64_t)T0 < (uint64_t)T2 ||
140 (xer_ca == 1 && (uint64_t)T0 == (uint64_t)T2)))) {
141 xer_ca = 0;
142 } else {
143 xer_ca = 1;
146 #endif
148 void do_addmeo (void)
150 T1 = T0;
151 T0 += xer_ca + (-1);
152 xer_ov = ((uint32_t)T1 & ((uint32_t)T1 ^ (uint32_t)T0)) >> 31;
153 xer_so |= xer_ov;
154 if (likely(T1 != 0))
155 xer_ca = 1;
156 else
157 xer_ca = 0;
160 #if defined(TARGET_PPC64)
161 void do_addmeo_64 (void)
163 T1 = T0;
164 T0 += xer_ca + (-1);
165 xer_ov = ((uint64_t)T1 & ((uint64_t)T1 ^ (uint64_t)T0)) >> 63;
166 xer_so |= xer_ov;
167 if (likely(T1 != 0))
168 xer_ca = 1;
169 else
170 xer_ca = 0;
172 #endif
174 void do_divwo (void)
176 if (likely(!(((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
177 (int32_t)T1 == 0))) {
178 xer_ov = 0;
179 T0 = (int32_t)T0 / (int32_t)T1;
180 } else {
181 xer_ov = 1;
182 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
184 xer_so |= xer_ov;
187 #if defined(TARGET_PPC64)
188 void do_divdo (void)
190 if (likely(!(((int64_t)T0 == INT64_MIN && (int64_t)T1 == (int64_t)-1LL) ||
191 (int64_t)T1 == 0))) {
192 xer_ov = 0;
193 T0 = (int64_t)T0 / (int64_t)T1;
194 } else {
195 xer_ov = 1;
196 T0 = UINT64_MAX * ((uint64_t)T0 >> 63);
198 xer_so |= xer_ov;
200 #endif
202 void do_divwuo (void)
204 if (likely((uint32_t)T1 != 0)) {
205 xer_ov = 0;
206 T0 = (uint32_t)T0 / (uint32_t)T1;
207 } else {
208 xer_ov = 1;
209 xer_so = 1;
210 T0 = 0;
214 #if defined(TARGET_PPC64)
215 void do_divduo (void)
217 if (likely((uint64_t)T1 != 0)) {
218 xer_ov = 0;
219 T0 = (uint64_t)T0 / (uint64_t)T1;
220 } else {
221 xer_ov = 1;
222 xer_so = 1;
223 T0 = 0;
226 #endif
228 void do_mullwo (void)
230 int64_t res = (int64_t)T0 * (int64_t)T1;
232 if (likely((int32_t)res == res)) {
233 xer_ov = 0;
234 } else {
235 xer_ov = 1;
236 xer_so = 1;
238 T0 = (int32_t)res;
241 #if defined(TARGET_PPC64)
242 void do_mulldo (void)
244 int64_t th;
245 uint64_t tl;
247 muls64(&tl, &th, T0, T1);
248 T0 = (int64_t)tl;
249 /* If th != 0 && th != -1, then we had an overflow */
250 if (likely((uint64_t)(th + 1) <= 1)) {
251 xer_ov = 0;
252 } else {
253 xer_ov = 1;
255 xer_so |= xer_ov;
257 #endif
259 void do_nego (void)
261 if (likely((int32_t)T0 != INT32_MIN)) {
262 xer_ov = 0;
263 T0 = -(int32_t)T0;
264 } else {
265 xer_ov = 1;
266 xer_so = 1;
270 #if defined(TARGET_PPC64)
271 void do_nego_64 (void)
273 if (likely((int64_t)T0 != INT64_MIN)) {
274 xer_ov = 0;
275 T0 = -(int64_t)T0;
276 } else {
277 xer_ov = 1;
278 xer_so = 1;
281 #endif
283 void do_subfe (void)
285 T0 = T1 + ~T0 + xer_ca;
286 if (likely((uint32_t)T0 >= (uint32_t)T1 &&
287 (xer_ca == 0 || (uint32_t)T0 != (uint32_t)T1))) {
288 xer_ca = 0;
289 } else {
290 xer_ca = 1;
294 #if defined(TARGET_PPC64)
295 void do_subfe_64 (void)
297 T0 = T1 + ~T0 + xer_ca;
298 if (likely((uint64_t)T0 >= (uint64_t)T1 &&
299 (xer_ca == 0 || (uint64_t)T0 != (uint64_t)T1))) {
300 xer_ca = 0;
301 } else {
302 xer_ca = 1;
305 #endif
307 void do_subfmeo (void)
309 T1 = T0;
310 T0 = ~T0 + xer_ca - 1;
311 xer_ov = ((uint32_t)~T1 & ((uint32_t)~T1 ^ (uint32_t)T0)) >> 31;
312 xer_so |= xer_ov;
313 if (likely((uint32_t)T1 != UINT32_MAX))
314 xer_ca = 1;
315 else
316 xer_ca = 0;
319 #if defined(TARGET_PPC64)
320 void do_subfmeo_64 (void)
322 T1 = T0;
323 T0 = ~T0 + xer_ca - 1;
324 xer_ov = ((uint64_t)~T1 & ((uint64_t)~T1 ^ (uint64_t)T0)) >> 63;
325 xer_so |= xer_ov;
326 if (likely((uint64_t)T1 != UINT64_MAX))
327 xer_ca = 1;
328 else
329 xer_ca = 0;
331 #endif
333 void do_subfzeo (void)
335 T1 = T0;
336 T0 = ~T0 + xer_ca;
337 xer_ov = (((uint32_t)~T1 ^ UINT32_MAX) &
338 ((uint32_t)(~T1) ^ (uint32_t)T0)) >> 31;
339 xer_so |= xer_ov;
340 if (likely((uint32_t)T0 >= (uint32_t)~T1)) {
341 xer_ca = 0;
342 } else {
343 xer_ca = 1;
347 #if defined(TARGET_PPC64)
348 void do_subfzeo_64 (void)
350 T1 = T0;
351 T0 = ~T0 + xer_ca;
352 xer_ov = (((uint64_t)~T1 ^ UINT64_MAX) &
353 ((uint64_t)(~T1) ^ (uint64_t)T0)) >> 63;
354 xer_so |= xer_ov;
355 if (likely((uint64_t)T0 >= (uint64_t)~T1)) {
356 xer_ca = 0;
357 } else {
358 xer_ca = 1;
361 #endif
363 void do_cntlzw (void)
365 T0 = clz32(T0);
368 #if defined(TARGET_PPC64)
369 void do_cntlzd (void)
371 T0 = clz64(T0);
373 #endif
375 /* shift right arithmetic helper */
376 void do_sraw (void)
378 int32_t ret;
380 if (likely(!(T1 & 0x20UL))) {
381 if (likely((uint32_t)T1 != 0)) {
382 ret = (int32_t)T0 >> (T1 & 0x1fUL);
383 if (likely(ret >= 0 || ((int32_t)T0 & ((1 << T1) - 1)) == 0)) {
384 xer_ca = 0;
385 } else {
386 xer_ca = 1;
388 } else {
389 ret = T0;
390 xer_ca = 0;
392 } else {
393 ret = UINT32_MAX * ((uint32_t)T0 >> 31);
394 if (likely(ret >= 0 || ((uint32_t)T0 & ~0x80000000UL) == 0)) {
395 xer_ca = 0;
396 } else {
397 xer_ca = 1;
400 T0 = ret;
403 #if defined(TARGET_PPC64)
404 void do_srad (void)
406 int64_t ret;
408 if (likely(!(T1 & 0x40UL))) {
409 if (likely((uint64_t)T1 != 0)) {
410 ret = (int64_t)T0 >> (T1 & 0x3FUL);
411 if (likely(ret >= 0 || ((int64_t)T0 & ((1 << T1) - 1)) == 0)) {
412 xer_ca = 0;
413 } else {
414 xer_ca = 1;
416 } else {
417 ret = T0;
418 xer_ca = 0;
420 } else {
421 ret = UINT64_MAX * ((uint64_t)T0 >> 63);
422 if (likely(ret >= 0 || ((uint64_t)T0 & ~0x8000000000000000ULL) == 0)) {
423 xer_ca = 0;
424 } else {
425 xer_ca = 1;
428 T0 = ret;
430 #endif
432 void do_popcntb (void)
434 uint32_t ret;
435 int i;
437 ret = 0;
438 for (i = 0; i < 32; i += 8)
439 ret |= ctpop8((T0 >> i) & 0xFF) << i;
440 T0 = ret;
443 #if defined(TARGET_PPC64)
444 void do_popcntb_64 (void)
446 uint64_t ret;
447 int i;
449 ret = 0;
450 for (i = 0; i < 64; i += 8)
451 ret |= ctpop8((T0 >> i) & 0xFF) << i;
452 T0 = ret;
454 #endif
456 /*****************************************************************************/
457 /* Floating point operations helpers */
458 static always_inline int fpisneg (float64 d)
460 CPU_DoubleU u;
462 u.d = d;
464 return u.ll >> 63 != 0;
467 static always_inline int isden (float64 d)
469 CPU_DoubleU u;
471 u.d = d;
473 return ((u.ll >> 52) & 0x7FF) == 0;
476 static always_inline int iszero (float64 d)
478 CPU_DoubleU u;
480 u.d = d;
482 return (u.ll & ~0x8000000000000000ULL) == 0;
485 static always_inline int isinfinity (float64 d)
487 CPU_DoubleU u;
489 u.d = d;
491 return ((u.ll >> 52) & 0x7FF) == 0x7FF &&
492 (u.ll & 0x000FFFFFFFFFFFFFULL) == 0;
495 #ifdef CONFIG_SOFTFLOAT
496 static always_inline int isfinite (float64 d)
498 CPU_DoubleU u;
500 u.d = d;
502 return (((u.ll >> 52) & 0x7FF) != 0x7FF);
505 static always_inline int isnormal (float64 d)
507 CPU_DoubleU u;
509 u.d = d;
511 uint32_t exp = (u.ll >> 52) & 0x7FF;
512 return ((0 < exp) && (exp < 0x7FF));
514 #endif
516 void do_compute_fprf (int set_fprf)
518 int isneg;
520 isneg = fpisneg(FT0);
521 if (unlikely(float64_is_nan(FT0))) {
522 if (float64_is_signaling_nan(FT0)) {
523 /* Signaling NaN: flags are undefined */
524 T0 = 0x00;
525 } else {
526 /* Quiet NaN */
527 T0 = 0x11;
529 } else if (unlikely(isinfinity(FT0))) {
530 /* +/- infinity */
531 if (isneg)
532 T0 = 0x09;
533 else
534 T0 = 0x05;
535 } else {
536 if (iszero(FT0)) {
537 /* +/- zero */
538 if (isneg)
539 T0 = 0x12;
540 else
541 T0 = 0x02;
542 } else {
543 if (isden(FT0)) {
544 /* Denormalized numbers */
545 T0 = 0x10;
546 } else {
547 /* Normalized numbers */
548 T0 = 0x00;
550 if (isneg) {
551 T0 |= 0x08;
552 } else {
553 T0 |= 0x04;
557 if (set_fprf) {
558 /* We update FPSCR_FPRF */
559 env->fpscr &= ~(0x1F << FPSCR_FPRF);
560 env->fpscr |= T0 << FPSCR_FPRF;
562 /* We just need fpcc to update Rc1 */
563 T0 &= 0xF;
566 /* Floating-point invalid operations exception */
567 static always_inline void fload_invalid_op_excp (int op)
569 int ve;
571 ve = fpscr_ve;
572 if (op & POWERPC_EXCP_FP_VXSNAN) {
573 /* Operation on signaling NaN */
574 env->fpscr |= 1 << FPSCR_VXSNAN;
576 if (op & POWERPC_EXCP_FP_VXSOFT) {
577 /* Software-defined condition */
578 env->fpscr |= 1 << FPSCR_VXSOFT;
580 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
581 case POWERPC_EXCP_FP_VXISI:
582 /* Magnitude subtraction of infinities */
583 env->fpscr |= 1 << FPSCR_VXISI;
584 goto update_arith;
585 case POWERPC_EXCP_FP_VXIDI:
586 /* Division of infinity by infinity */
587 env->fpscr |= 1 << FPSCR_VXIDI;
588 goto update_arith;
589 case POWERPC_EXCP_FP_VXZDZ:
590 /* Division of zero by zero */
591 env->fpscr |= 1 << FPSCR_VXZDZ;
592 goto update_arith;
593 case POWERPC_EXCP_FP_VXIMZ:
594 /* Multiplication of zero by infinity */
595 env->fpscr |= 1 << FPSCR_VXIMZ;
596 goto update_arith;
597 case POWERPC_EXCP_FP_VXVC:
598 /* Ordered comparison of NaN */
599 env->fpscr |= 1 << FPSCR_VXVC;
600 env->fpscr &= ~(0xF << FPSCR_FPCC);
601 env->fpscr |= 0x11 << FPSCR_FPCC;
602 /* We must update the target FPR before raising the exception */
603 if (ve != 0) {
604 env->exception_index = POWERPC_EXCP_PROGRAM;
605 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
606 /* Update the floating-point enabled exception summary */
607 env->fpscr |= 1 << FPSCR_FEX;
608 /* Exception is differed */
609 ve = 0;
611 break;
612 case POWERPC_EXCP_FP_VXSQRT:
613 /* Square root of a negative number */
614 env->fpscr |= 1 << FPSCR_VXSQRT;
615 update_arith:
616 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
617 if (ve == 0) {
618 /* Set the result to quiet NaN */
619 FT0 = UINT64_MAX;
620 env->fpscr &= ~(0xF << FPSCR_FPCC);
621 env->fpscr |= 0x11 << FPSCR_FPCC;
623 break;
624 case POWERPC_EXCP_FP_VXCVI:
625 /* Invalid conversion */
626 env->fpscr |= 1 << FPSCR_VXCVI;
627 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
628 if (ve == 0) {
629 /* Set the result to quiet NaN */
630 FT0 = UINT64_MAX;
631 env->fpscr &= ~(0xF << FPSCR_FPCC);
632 env->fpscr |= 0x11 << FPSCR_FPCC;
634 break;
636 /* Update the floating-point invalid operation summary */
637 env->fpscr |= 1 << FPSCR_VX;
638 /* Update the floating-point exception summary */
639 env->fpscr |= 1 << FPSCR_FX;
640 if (ve != 0) {
641 /* Update the floating-point enabled exception summary */
642 env->fpscr |= 1 << FPSCR_FEX;
643 if (msr_fe0 != 0 || msr_fe1 != 0)
644 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
648 static always_inline void float_zero_divide_excp (void)
650 CPU_DoubleU u0, u1;
652 env->fpscr |= 1 << FPSCR_ZX;
653 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
654 /* Update the floating-point exception summary */
655 env->fpscr |= 1 << FPSCR_FX;
656 if (fpscr_ze != 0) {
657 /* Update the floating-point enabled exception summary */
658 env->fpscr |= 1 << FPSCR_FEX;
659 if (msr_fe0 != 0 || msr_fe1 != 0) {
660 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
661 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
663 } else {
664 /* Set the result to infinity */
665 u0.d = FT0;
666 u1.d = FT1;
667 u0.ll = ((u0.ll ^ u1.ll) & 0x8000000000000000ULL);
668 u0.ll |= 0x7FFULL << 52;
669 FT0 = u0.d;
673 static always_inline void float_overflow_excp (void)
675 env->fpscr |= 1 << FPSCR_OX;
676 /* Update the floating-point exception summary */
677 env->fpscr |= 1 << FPSCR_FX;
678 if (fpscr_oe != 0) {
679 /* XXX: should adjust the result */
680 /* Update the floating-point enabled exception summary */
681 env->fpscr |= 1 << FPSCR_FEX;
682 /* We must update the target FPR before raising the exception */
683 env->exception_index = POWERPC_EXCP_PROGRAM;
684 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
685 } else {
686 env->fpscr |= 1 << FPSCR_XX;
687 env->fpscr |= 1 << FPSCR_FI;
691 static always_inline void float_underflow_excp (void)
693 env->fpscr |= 1 << FPSCR_UX;
694 /* Update the floating-point exception summary */
695 env->fpscr |= 1 << FPSCR_FX;
696 if (fpscr_ue != 0) {
697 /* XXX: should adjust the result */
698 /* Update the floating-point enabled exception summary */
699 env->fpscr |= 1 << FPSCR_FEX;
700 /* We must update the target FPR before raising the exception */
701 env->exception_index = POWERPC_EXCP_PROGRAM;
702 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
706 static always_inline void float_inexact_excp (void)
708 env->fpscr |= 1 << FPSCR_XX;
709 /* Update the floating-point exception summary */
710 env->fpscr |= 1 << FPSCR_FX;
711 if (fpscr_xe != 0) {
712 /* Update the floating-point enabled exception summary */
713 env->fpscr |= 1 << FPSCR_FEX;
714 /* We must update the target FPR before raising the exception */
715 env->exception_index = POWERPC_EXCP_PROGRAM;
716 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
720 static always_inline void fpscr_set_rounding_mode (void)
722 int rnd_type;
724 /* Set rounding mode */
725 switch (fpscr_rn) {
726 case 0:
727 /* Best approximation (round to nearest) */
728 rnd_type = float_round_nearest_even;
729 break;
730 case 1:
731 /* Smaller magnitude (round toward zero) */
732 rnd_type = float_round_to_zero;
733 break;
734 case 2:
735 /* Round toward +infinite */
736 rnd_type = float_round_up;
737 break;
738 default:
739 case 3:
740 /* Round toward -infinite */
741 rnd_type = float_round_down;
742 break;
744 set_float_rounding_mode(rnd_type, &env->fp_status);
747 void do_fpscr_setbit (int bit)
749 int prev;
751 prev = (env->fpscr >> bit) & 1;
752 env->fpscr |= 1 << bit;
753 if (prev == 0) {
754 switch (bit) {
755 case FPSCR_VX:
756 env->fpscr |= 1 << FPSCR_FX;
757 if (fpscr_ve)
758 goto raise_ve;
759 case FPSCR_OX:
760 env->fpscr |= 1 << FPSCR_FX;
761 if (fpscr_oe)
762 goto raise_oe;
763 break;
764 case FPSCR_UX:
765 env->fpscr |= 1 << FPSCR_FX;
766 if (fpscr_ue)
767 goto raise_ue;
768 break;
769 case FPSCR_ZX:
770 env->fpscr |= 1 << FPSCR_FX;
771 if (fpscr_ze)
772 goto raise_ze;
773 break;
774 case FPSCR_XX:
775 env->fpscr |= 1 << FPSCR_FX;
776 if (fpscr_xe)
777 goto raise_xe;
778 break;
779 case FPSCR_VXSNAN:
780 case FPSCR_VXISI:
781 case FPSCR_VXIDI:
782 case FPSCR_VXZDZ:
783 case FPSCR_VXIMZ:
784 case FPSCR_VXVC:
785 case FPSCR_VXSOFT:
786 case FPSCR_VXSQRT:
787 case FPSCR_VXCVI:
788 env->fpscr |= 1 << FPSCR_VX;
789 env->fpscr |= 1 << FPSCR_FX;
790 if (fpscr_ve != 0)
791 goto raise_ve;
792 break;
793 case FPSCR_VE:
794 if (fpscr_vx != 0) {
795 raise_ve:
796 env->error_code = POWERPC_EXCP_FP;
797 if (fpscr_vxsnan)
798 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
799 if (fpscr_vxisi)
800 env->error_code |= POWERPC_EXCP_FP_VXISI;
801 if (fpscr_vxidi)
802 env->error_code |= POWERPC_EXCP_FP_VXIDI;
803 if (fpscr_vxzdz)
804 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
805 if (fpscr_vximz)
806 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
807 if (fpscr_vxvc)
808 env->error_code |= POWERPC_EXCP_FP_VXVC;
809 if (fpscr_vxsoft)
810 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
811 if (fpscr_vxsqrt)
812 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
813 if (fpscr_vxcvi)
814 env->error_code |= POWERPC_EXCP_FP_VXCVI;
815 goto raise_excp;
817 break;
818 case FPSCR_OE:
819 if (fpscr_ox != 0) {
820 raise_oe:
821 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
822 goto raise_excp;
824 break;
825 case FPSCR_UE:
826 if (fpscr_ux != 0) {
827 raise_ue:
828 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
829 goto raise_excp;
831 break;
832 case FPSCR_ZE:
833 if (fpscr_zx != 0) {
834 raise_ze:
835 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
836 goto raise_excp;
838 break;
839 case FPSCR_XE:
840 if (fpscr_xx != 0) {
841 raise_xe:
842 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
843 goto raise_excp;
845 break;
846 case FPSCR_RN1:
847 case FPSCR_RN:
848 fpscr_set_rounding_mode();
849 break;
850 default:
851 break;
852 raise_excp:
853 /* Update the floating-point enabled exception summary */
854 env->fpscr |= 1 << FPSCR_FEX;
855 /* We have to update Rc1 before raising the exception */
856 env->exception_index = POWERPC_EXCP_PROGRAM;
857 break;
862 #if defined(WORDS_BIGENDIAN)
863 #define WORD0 0
864 #define WORD1 1
865 #else
866 #define WORD0 1
867 #define WORD1 0
868 #endif
869 void do_store_fpscr (uint32_t mask)
872 * We use only the 32 LSB of the incoming fpr
874 CPU_DoubleU u;
875 uint32_t prev, new;
876 int i;
878 u.d = FT0;
879 prev = env->fpscr;
880 new = u.l.lower;
881 new &= ~0x90000000;
882 new |= prev & 0x90000000;
883 for (i = 0; i < 7; i++) {
884 if (mask & (1 << i)) {
885 env->fpscr &= ~(0xF << (4 * i));
886 env->fpscr |= new & (0xF << (4 * i));
889 /* Update VX and FEX */
890 if (fpscr_ix != 0)
891 env->fpscr |= 1 << FPSCR_VX;
892 else
893 env->fpscr &= ~(1 << FPSCR_VX);
894 if ((fpscr_ex & fpscr_eex) != 0) {
895 env->fpscr |= 1 << FPSCR_FEX;
896 env->exception_index = POWERPC_EXCP_PROGRAM;
897 /* XXX: we should compute it properly */
898 env->error_code = POWERPC_EXCP_FP;
900 else
901 env->fpscr &= ~(1 << FPSCR_FEX);
902 fpscr_set_rounding_mode();
904 #undef WORD0
905 #undef WORD1
907 #ifdef CONFIG_SOFTFLOAT
908 void do_float_check_status (void)
910 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
911 (env->error_code & POWERPC_EXCP_FP)) {
912 /* Differred floating-point exception after target FPR update */
913 if (msr_fe0 != 0 || msr_fe1 != 0)
914 do_raise_exception_err(env->exception_index, env->error_code);
915 } else if (env->fp_status.float_exception_flags & float_flag_overflow) {
916 float_overflow_excp();
917 } else if (env->fp_status.float_exception_flags & float_flag_underflow) {
918 float_underflow_excp();
919 } else if (env->fp_status.float_exception_flags & float_flag_inexact) {
920 float_inexact_excp();
923 #endif
925 #if USE_PRECISE_EMULATION
926 void do_fadd (void)
928 if (unlikely(float64_is_signaling_nan(FT0) ||
929 float64_is_signaling_nan(FT1))) {
930 /* sNaN addition */
931 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
932 } else if (likely(isfinite(FT0) || isfinite(FT1) ||
933 fpisneg(FT0) == fpisneg(FT1))) {
934 FT0 = float64_add(FT0, FT1, &env->fp_status);
935 } else {
936 /* Magnitude subtraction of infinities */
937 fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
941 void do_fsub (void)
943 if (unlikely(float64_is_signaling_nan(FT0) ||
944 float64_is_signaling_nan(FT1))) {
945 /* sNaN subtraction */
946 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
947 } else if (likely(isfinite(FT0) || isfinite(FT1) ||
948 fpisneg(FT0) != fpisneg(FT1))) {
949 FT0 = float64_sub(FT0, FT1, &env->fp_status);
950 } else {
951 /* Magnitude subtraction of infinities */
952 fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
956 void do_fmul (void)
958 if (unlikely(float64_is_signaling_nan(FT0) ||
959 float64_is_signaling_nan(FT1))) {
960 /* sNaN multiplication */
961 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
962 } else if (unlikely((isinfinity(FT0) && iszero(FT1)) ||
963 (iszero(FT0) && isinfinity(FT1)))) {
964 /* Multiplication of zero by infinity */
965 fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
966 } else {
967 FT0 = float64_mul(FT0, FT1, &env->fp_status);
971 void do_fdiv (void)
973 if (unlikely(float64_is_signaling_nan(FT0) ||
974 float64_is_signaling_nan(FT1))) {
975 /* sNaN division */
976 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
977 } else if (unlikely(isinfinity(FT0) && isinfinity(FT1))) {
978 /* Division of infinity by infinity */
979 fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
980 } else if (unlikely(iszero(FT1))) {
981 if (iszero(FT0)) {
982 /* Division of zero by zero */
983 fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
984 } else {
985 /* Division by zero */
986 float_zero_divide_excp();
988 } else {
989 FT0 = float64_div(FT0, FT1, &env->fp_status);
992 #endif /* USE_PRECISE_EMULATION */
994 void do_fctiw (void)
996 CPU_DoubleU p;
998 if (unlikely(float64_is_signaling_nan(FT0))) {
999 /* sNaN conversion */
1000 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1001 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1002 /* qNan / infinity conversion */
1003 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1004 } else {
1005 p.ll = float64_to_int32(FT0, &env->fp_status);
1006 #if USE_PRECISE_EMULATION
1007 /* XXX: higher bits are not supposed to be significant.
1008 * to make tests easier, return the same as a real PowerPC 750
1010 p.ll |= 0xFFF80000ULL << 32;
1011 #endif
1012 FT0 = p.d;
1016 void do_fctiwz (void)
1018 CPU_DoubleU p;
1020 if (unlikely(float64_is_signaling_nan(FT0))) {
1021 /* sNaN conversion */
1022 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1023 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1024 /* qNan / infinity conversion */
1025 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1026 } else {
1027 p.ll = float64_to_int32_round_to_zero(FT0, &env->fp_status);
1028 #if USE_PRECISE_EMULATION
1029 /* XXX: higher bits are not supposed to be significant.
1030 * to make tests easier, return the same as a real PowerPC 750
1032 p.ll |= 0xFFF80000ULL << 32;
1033 #endif
1034 FT0 = p.d;
1038 #if defined(TARGET_PPC64)
1039 void do_fcfid (void)
1041 CPU_DoubleU p;
1043 p.d = FT0;
1044 FT0 = int64_to_float64(p.ll, &env->fp_status);
1047 void do_fctid (void)
1049 CPU_DoubleU p;
1051 if (unlikely(float64_is_signaling_nan(FT0))) {
1052 /* sNaN conversion */
1053 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1054 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1055 /* qNan / infinity conversion */
1056 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1057 } else {
1058 p.ll = float64_to_int64(FT0, &env->fp_status);
1059 FT0 = p.d;
1063 void do_fctidz (void)
1065 CPU_DoubleU p;
1067 if (unlikely(float64_is_signaling_nan(FT0))) {
1068 /* sNaN conversion */
1069 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1070 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1071 /* qNan / infinity conversion */
1072 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1073 } else {
1074 p.ll = float64_to_int64_round_to_zero(FT0, &env->fp_status);
1075 FT0 = p.d;
1079 #endif
1081 static always_inline void do_fri (int rounding_mode)
1083 if (unlikely(float64_is_signaling_nan(FT0))) {
1084 /* sNaN round */
1085 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1086 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1087 /* qNan / infinity round */
1088 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1089 } else {
1090 set_float_rounding_mode(rounding_mode, &env->fp_status);
1091 FT0 = float64_round_to_int(FT0, &env->fp_status);
1092 /* Restore rounding mode from FPSCR */
1093 fpscr_set_rounding_mode();
1097 void do_frin (void)
1099 do_fri(float_round_nearest_even);
1102 void do_friz (void)
1104 do_fri(float_round_to_zero);
1107 void do_frip (void)
1109 do_fri(float_round_up);
1112 void do_frim (void)
1114 do_fri(float_round_down);
1117 #if USE_PRECISE_EMULATION
1118 void do_fmadd (void)
1120 if (unlikely(float64_is_signaling_nan(FT0) ||
1121 float64_is_signaling_nan(FT1) ||
1122 float64_is_signaling_nan(FT2))) {
1123 /* sNaN operation */
1124 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1125 } else {
1126 #ifdef FLOAT128
1127 /* This is the way the PowerPC specification defines it */
1128 float128 ft0_128, ft1_128;
1130 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1131 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1132 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1133 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1134 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1135 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1136 #else
1137 /* This is OK on x86 hosts */
1138 FT0 = (FT0 * FT1) + FT2;
1139 #endif
1143 void do_fmsub (void)
1145 if (unlikely(float64_is_signaling_nan(FT0) ||
1146 float64_is_signaling_nan(FT1) ||
1147 float64_is_signaling_nan(FT2))) {
1148 /* sNaN operation */
1149 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1150 } else {
1151 #ifdef FLOAT128
1152 /* This is the way the PowerPC specification defines it */
1153 float128 ft0_128, ft1_128;
1155 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1156 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1157 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1158 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1159 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1160 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1161 #else
1162 /* This is OK on x86 hosts */
1163 FT0 = (FT0 * FT1) - FT2;
1164 #endif
1167 #endif /* USE_PRECISE_EMULATION */
1169 void do_fnmadd (void)
1171 if (unlikely(float64_is_signaling_nan(FT0) ||
1172 float64_is_signaling_nan(FT1) ||
1173 float64_is_signaling_nan(FT2))) {
1174 /* sNaN operation */
1175 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1176 } else {
1177 #if USE_PRECISE_EMULATION
1178 #ifdef FLOAT128
1179 /* This is the way the PowerPC specification defines it */
1180 float128 ft0_128, ft1_128;
1182 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1183 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1184 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1185 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1186 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1187 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1188 #else
1189 /* This is OK on x86 hosts */
1190 FT0 = (FT0 * FT1) + FT2;
1191 #endif
1192 #else
1193 FT0 = float64_mul(FT0, FT1, &env->fp_status);
1194 FT0 = float64_add(FT0, FT2, &env->fp_status);
1195 #endif
1196 if (likely(!isnan(FT0)))
1197 FT0 = float64_chs(FT0);
1201 void do_fnmsub (void)
1203 if (unlikely(float64_is_signaling_nan(FT0) ||
1204 float64_is_signaling_nan(FT1) ||
1205 float64_is_signaling_nan(FT2))) {
1206 /* sNaN operation */
1207 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1208 } else {
1209 #if USE_PRECISE_EMULATION
1210 #ifdef FLOAT128
1211 /* This is the way the PowerPC specification defines it */
1212 float128 ft0_128, ft1_128;
1214 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1215 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1216 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1217 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1218 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1219 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1220 #else
1221 /* This is OK on x86 hosts */
1222 FT0 = (FT0 * FT1) - FT2;
1223 #endif
1224 #else
1225 FT0 = float64_mul(FT0, FT1, &env->fp_status);
1226 FT0 = float64_sub(FT0, FT2, &env->fp_status);
1227 #endif
1228 if (likely(!isnan(FT0)))
1229 FT0 = float64_chs(FT0);
1233 #if USE_PRECISE_EMULATION
1234 void do_frsp (void)
1236 if (unlikely(float64_is_signaling_nan(FT0))) {
1237 /* sNaN square root */
1238 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1239 } else {
1240 FT0 = float64_to_float32(FT0, &env->fp_status);
1243 #endif /* USE_PRECISE_EMULATION */
1245 void do_fsqrt (void)
1247 if (unlikely(float64_is_signaling_nan(FT0))) {
1248 /* sNaN square root */
1249 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1250 } else if (unlikely(fpisneg(FT0) && !iszero(FT0))) {
1251 /* Square root of a negative nonzero number */
1252 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1253 } else {
1254 FT0 = float64_sqrt(FT0, &env->fp_status);
1258 void do_fre (void)
1260 CPU_DoubleU p;
1262 if (unlikely(float64_is_signaling_nan(FT0))) {
1263 /* sNaN reciprocal */
1264 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1265 } else if (unlikely(iszero(FT0))) {
1266 /* Zero reciprocal */
1267 float_zero_divide_excp();
1268 } else if (likely(isnormal(FT0))) {
1269 FT0 = float64_div(1.0, FT0, &env->fp_status);
1270 } else {
1271 p.d = FT0;
1272 if (p.ll == 0x8000000000000000ULL) {
1273 p.ll = 0xFFF0000000000000ULL;
1274 } else if (p.ll == 0x0000000000000000ULL) {
1275 p.ll = 0x7FF0000000000000ULL;
1276 } else if (isnan(FT0)) {
1277 p.ll = 0x7FF8000000000000ULL;
1278 } else if (fpisneg(FT0)) {
1279 p.ll = 0x8000000000000000ULL;
1280 } else {
1281 p.ll = 0x0000000000000000ULL;
1283 FT0 = p.d;
1287 void do_fres (void)
1289 CPU_DoubleU p;
1291 if (unlikely(float64_is_signaling_nan(FT0))) {
1292 /* sNaN reciprocal */
1293 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1294 } else if (unlikely(iszero(FT0))) {
1295 /* Zero reciprocal */
1296 float_zero_divide_excp();
1297 } else if (likely(isnormal(FT0))) {
1298 #if USE_PRECISE_EMULATION
1299 FT0 = float64_div(1.0, FT0, &env->fp_status);
1300 FT0 = float64_to_float32(FT0, &env->fp_status);
1301 #else
1302 FT0 = float32_div(1.0, FT0, &env->fp_status);
1303 #endif
1304 } else {
1305 p.d = FT0;
1306 if (p.ll == 0x8000000000000000ULL) {
1307 p.ll = 0xFFF0000000000000ULL;
1308 } else if (p.ll == 0x0000000000000000ULL) {
1309 p.ll = 0x7FF0000000000000ULL;
1310 } else if (isnan(FT0)) {
1311 p.ll = 0x7FF8000000000000ULL;
1312 } else if (fpisneg(FT0)) {
1313 p.ll = 0x8000000000000000ULL;
1314 } else {
1315 p.ll = 0x0000000000000000ULL;
1317 FT0 = p.d;
1321 void do_frsqrte (void)
1323 CPU_DoubleU p;
1325 if (unlikely(float64_is_signaling_nan(FT0))) {
1326 /* sNaN reciprocal square root */
1327 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1328 } else if (unlikely(fpisneg(FT0) && !iszero(FT0))) {
1329 /* Reciprocal square root of a negative nonzero number */
1330 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1331 } else if (likely(isnormal(FT0))) {
1332 FT0 = float64_sqrt(FT0, &env->fp_status);
1333 FT0 = float32_div(1.0, FT0, &env->fp_status);
1334 } else {
1335 p.d = FT0;
1336 if (p.ll == 0x8000000000000000ULL) {
1337 p.ll = 0xFFF0000000000000ULL;
1338 } else if (p.ll == 0x0000000000000000ULL) {
1339 p.ll = 0x7FF0000000000000ULL;
1340 } else if (isnan(FT0)) {
1341 p.ll |= 0x000FFFFFFFFFFFFFULL;
1342 } else if (fpisneg(FT0)) {
1343 p.ll = 0x7FF8000000000000ULL;
1344 } else {
1345 p.ll = 0x0000000000000000ULL;
1347 FT0 = p.d;
1351 void do_fsel (void)
1353 if (!fpisneg(FT0) || iszero(FT0))
1354 FT0 = FT1;
1355 else
1356 FT0 = FT2;
1359 void do_fcmpu (void)
1361 if (unlikely(float64_is_signaling_nan(FT0) ||
1362 float64_is_signaling_nan(FT1))) {
1363 /* sNaN comparison */
1364 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1365 } else {
1366 if (float64_lt(FT0, FT1, &env->fp_status)) {
1367 T0 = 0x08UL;
1368 } else if (!float64_le(FT0, FT1, &env->fp_status)) {
1369 T0 = 0x04UL;
1370 } else {
1371 T0 = 0x02UL;
1374 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1375 env->fpscr |= T0 << FPSCR_FPRF;
1378 void do_fcmpo (void)
1380 if (unlikely(float64_is_nan(FT0) ||
1381 float64_is_nan(FT1))) {
1382 if (float64_is_signaling_nan(FT0) ||
1383 float64_is_signaling_nan(FT1)) {
1384 /* sNaN comparison */
1385 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1386 POWERPC_EXCP_FP_VXVC);
1387 } else {
1388 /* qNaN comparison */
1389 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1391 } else {
1392 if (float64_lt(FT0, FT1, &env->fp_status)) {
1393 T0 = 0x08UL;
1394 } else if (!float64_le(FT0, FT1, &env->fp_status)) {
1395 T0 = 0x04UL;
1396 } else {
1397 T0 = 0x02UL;
1400 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1401 env->fpscr |= T0 << FPSCR_FPRF;
1404 #if !defined (CONFIG_USER_ONLY)
1405 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
1407 void do_store_msr (void)
1409 T0 = hreg_store_msr(env, T0, 0);
1410 if (T0 != 0) {
1411 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1412 do_raise_exception(T0);
1416 static always_inline void __do_rfi (target_ulong nip, target_ulong msr,
1417 target_ulong msrm, int keep_msrh)
1419 #if defined(TARGET_PPC64)
1420 if (msr & (1ULL << MSR_SF)) {
1421 nip = (uint64_t)nip;
1422 msr &= (uint64_t)msrm;
1423 } else {
1424 nip = (uint32_t)nip;
1425 msr = (uint32_t)(msr & msrm);
1426 if (keep_msrh)
1427 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1429 #else
1430 nip = (uint32_t)nip;
1431 msr &= (uint32_t)msrm;
1432 #endif
1433 /* XXX: beware: this is false if VLE is supported */
1434 env->nip = nip & ~((target_ulong)0x00000003);
1435 hreg_store_msr(env, msr, 1);
1436 #if defined (DEBUG_OP)
1437 cpu_dump_rfi(env->nip, env->msr);
1438 #endif
1439 /* No need to raise an exception here,
1440 * as rfi is always the last insn of a TB
1442 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1445 void do_rfi (void)
1447 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1448 ~((target_ulong)0xFFFF0000), 1);
1451 #if defined(TARGET_PPC64)
1452 void do_rfid (void)
1454 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1455 ~((target_ulong)0xFFFF0000), 0);
1458 void do_hrfid (void)
1460 __do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1461 ~((target_ulong)0xFFFF0000), 0);
1463 #endif
1464 #endif
1466 void do_tw (int flags)
1468 if (!likely(!(((int32_t)T0 < (int32_t)T1 && (flags & 0x10)) ||
1469 ((int32_t)T0 > (int32_t)T1 && (flags & 0x08)) ||
1470 ((int32_t)T0 == (int32_t)T1 && (flags & 0x04)) ||
1471 ((uint32_t)T0 < (uint32_t)T1 && (flags & 0x02)) ||
1472 ((uint32_t)T0 > (uint32_t)T1 && (flags & 0x01))))) {
1473 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1477 #if defined(TARGET_PPC64)
1478 void do_td (int flags)
1480 if (!likely(!(((int64_t)T0 < (int64_t)T1 && (flags & 0x10)) ||
1481 ((int64_t)T0 > (int64_t)T1 && (flags & 0x08)) ||
1482 ((int64_t)T0 == (int64_t)T1 && (flags & 0x04)) ||
1483 ((uint64_t)T0 < (uint64_t)T1 && (flags & 0x02)) ||
1484 ((uint64_t)T0 > (uint64_t)T1 && (flags & 0x01)))))
1485 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1487 #endif
1489 /*****************************************************************************/
1490 /* PowerPC 601 specific instructions (POWER bridge) */
1491 void do_POWER_abso (void)
1493 if ((int32_t)T0 == INT32_MIN) {
1494 T0 = INT32_MAX;
1495 xer_ov = 1;
1496 } else if ((int32_t)T0 < 0) {
1497 T0 = -T0;
1498 xer_ov = 0;
1499 } else {
1500 xer_ov = 0;
1502 xer_so |= xer_ov;
1505 void do_POWER_clcs (void)
1507 switch (T0) {
1508 case 0x0CUL:
1509 /* Instruction cache line size */
1510 T0 = env->icache_line_size;
1511 break;
1512 case 0x0DUL:
1513 /* Data cache line size */
1514 T0 = env->dcache_line_size;
1515 break;
1516 case 0x0EUL:
1517 /* Minimum cache line size */
1518 T0 = env->icache_line_size < env->dcache_line_size ?
1519 env->icache_line_size : env->dcache_line_size;
1520 break;
1521 case 0x0FUL:
1522 /* Maximum cache line size */
1523 T0 = env->icache_line_size > env->dcache_line_size ?
1524 env->icache_line_size : env->dcache_line_size;
1525 break;
1526 default:
1527 /* Undefined */
1528 break;
1532 void do_POWER_div (void)
1534 uint64_t tmp;
1536 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1537 (int32_t)T1 == 0) {
1538 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1539 env->spr[SPR_MQ] = 0;
1540 } else {
1541 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1542 env->spr[SPR_MQ] = tmp % T1;
1543 T0 = tmp / (int32_t)T1;
1547 void do_POWER_divo (void)
1549 int64_t tmp;
1551 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1552 (int32_t)T1 == 0) {
1553 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1554 env->spr[SPR_MQ] = 0;
1555 xer_ov = 1;
1556 } else {
1557 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1558 env->spr[SPR_MQ] = tmp % T1;
1559 tmp /= (int32_t)T1;
1560 if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
1561 xer_ov = 1;
1562 } else {
1563 xer_ov = 0;
1565 T0 = tmp;
1567 xer_so |= xer_ov;
1570 void do_POWER_divs (void)
1572 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1573 (int32_t)T1 == 0) {
1574 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1575 env->spr[SPR_MQ] = 0;
1576 } else {
1577 env->spr[SPR_MQ] = T0 % T1;
1578 T0 = (int32_t)T0 / (int32_t)T1;
1582 void do_POWER_divso (void)
1584 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1585 (int32_t)T1 == 0) {
1586 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1587 env->spr[SPR_MQ] = 0;
1588 xer_ov = 1;
1589 } else {
1590 T0 = (int32_t)T0 / (int32_t)T1;
1591 env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
1592 xer_ov = 0;
1594 xer_so |= xer_ov;
1597 void do_POWER_dozo (void)
1599 if ((int32_t)T1 > (int32_t)T0) {
1600 T2 = T0;
1601 T0 = T1 - T0;
1602 if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
1603 ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
1604 xer_ov = 1;
1605 xer_so = 1;
1606 } else {
1607 xer_ov = 0;
1609 } else {
1610 T0 = 0;
1611 xer_ov = 0;
1615 void do_POWER_maskg (void)
1617 uint32_t ret;
1619 if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
1620 ret = UINT32_MAX;
1621 } else {
1622 ret = (UINT32_MAX >> ((uint32_t)T0)) ^
1623 ((UINT32_MAX >> ((uint32_t)T1)) >> 1);
1624 if ((uint32_t)T0 > (uint32_t)T1)
1625 ret = ~ret;
1627 T0 = ret;
1630 void do_POWER_mulo (void)
1632 uint64_t tmp;
1634 tmp = (uint64_t)T0 * (uint64_t)T1;
1635 env->spr[SPR_MQ] = tmp >> 32;
1636 T0 = tmp;
1637 if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) {
1638 xer_ov = 1;
1639 xer_so = 1;
1640 } else {
1641 xer_ov = 0;
1645 #if !defined (CONFIG_USER_ONLY)
1646 void do_POWER_rac (void)
1648 mmu_ctx_t ctx;
1649 int nb_BATs;
1651 /* We don't have to generate many instances of this instruction,
1652 * as rac is supervisor only.
1654 /* XXX: FIX THIS: Pretend we have no BAT */
1655 nb_BATs = env->nb_BATs;
1656 env->nb_BATs = 0;
1657 if (get_physical_address(env, &ctx, T0, 0, ACCESS_INT) == 0)
1658 T0 = ctx.raddr;
1659 env->nb_BATs = nb_BATs;
1662 void do_POWER_rfsvc (void)
1664 __do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1667 void do_store_hid0_601 (void)
1669 uint32_t hid0;
1671 hid0 = env->spr[SPR_HID0];
1672 if ((T0 ^ hid0) & 0x00000008) {
1673 /* Change current endianness */
1674 env->hflags &= ~(1 << MSR_LE);
1675 env->hflags_nmsr &= ~(1 << MSR_LE);
1676 env->hflags_nmsr |= (1 << MSR_LE) & (((T0 >> 3) & 1) << MSR_LE);
1677 env->hflags |= env->hflags_nmsr;
1678 if (loglevel != 0) {
1679 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
1680 __func__, T0 & 0x8 ? 'l' : 'b', env->hflags);
1683 env->spr[SPR_HID0] = T0;
1685 #endif
1687 /*****************************************************************************/
1688 /* 602 specific instructions */
1689 /* mfrom is the most crazy instruction ever seen, imho ! */
1690 /* Real implementation uses a ROM table. Do the same */
1691 #define USE_MFROM_ROM_TABLE
1692 void do_op_602_mfrom (void)
1694 if (likely(T0 < 602)) {
1695 #if defined(USE_MFROM_ROM_TABLE)
1696 #include "mfrom_table.c"
1697 T0 = mfrom_ROM_table[T0];
1698 #else
1699 double d;
1700 /* Extremly decomposed:
1701 * -T0 / 256
1702 * T0 = 256 * log10(10 + 1.0) + 0.5
1704 d = T0;
1705 d = float64_div(d, 256, &env->fp_status);
1706 d = float64_chs(d);
1707 d = exp10(d); // XXX: use float emulation function
1708 d = float64_add(d, 1.0, &env->fp_status);
1709 d = log10(d); // XXX: use float emulation function
1710 d = float64_mul(d, 256, &env->fp_status);
1711 d = float64_add(d, 0.5, &env->fp_status);
1712 T0 = float64_round_to_int(d, &env->fp_status);
1713 #endif
1714 } else {
1715 T0 = 0;
1719 /*****************************************************************************/
1720 /* Embedded PowerPC specific helpers */
1721 void do_405_check_sat (void)
1723 if (!likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
1724 !(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
1725 /* Saturate result */
1726 if (T2 >> 31) {
1727 T0 = INT32_MIN;
1728 } else {
1729 T0 = INT32_MAX;
1734 /* XXX: to be improved to check access rights when in user-mode */
1735 void do_load_dcr (void)
1737 target_ulong val;
1739 if (unlikely(env->dcr_env == NULL)) {
1740 if (loglevel != 0) {
1741 fprintf(logfile, "No DCR environment\n");
1743 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1744 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1745 } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) {
1746 if (loglevel != 0) {
1747 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1749 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1750 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1751 } else {
1752 T0 = val;
1756 void do_store_dcr (void)
1758 if (unlikely(env->dcr_env == NULL)) {
1759 if (loglevel != 0) {
1760 fprintf(logfile, "No DCR environment\n");
1762 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1763 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1764 } else if (unlikely(ppc_dcr_write(env->dcr_env, T0, T1) != 0)) {
1765 if (loglevel != 0) {
1766 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1768 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1769 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1773 #if !defined(CONFIG_USER_ONLY)
1774 void do_40x_rfci (void)
1776 __do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1777 ~((target_ulong)0xFFFF0000), 0);
1780 void do_rfci (void)
1782 __do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1783 ~((target_ulong)0x3FFF0000), 0);
1786 void do_rfdi (void)
1788 __do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1789 ~((target_ulong)0x3FFF0000), 0);
1792 void do_rfmci (void)
1794 __do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1795 ~((target_ulong)0x3FFF0000), 0);
1798 void do_load_403_pb (int num)
1800 T0 = env->pb[num];
1803 void do_store_403_pb (int num)
1805 if (likely(env->pb[num] != T0)) {
1806 env->pb[num] = T0;
1807 /* Should be optimized */
1808 tlb_flush(env, 1);
1811 #endif
1813 /* 440 specific */
1814 void do_440_dlmzb (void)
1816 target_ulong mask;
1817 int i;
1819 i = 1;
1820 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1821 if ((T0 & mask) == 0)
1822 goto done;
1823 i++;
1825 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1826 if ((T1 & mask) == 0)
1827 break;
1828 i++;
1830 done:
1831 T0 = i;
1834 /* SPE extension helpers */
1835 /* Use a table to make this quicker */
1836 static uint8_t hbrev[16] = {
1837 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1838 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1841 static always_inline uint8_t byte_reverse (uint8_t val)
1843 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1846 static always_inline uint32_t word_reverse (uint32_t val)
1848 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1849 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1852 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
1853 void do_brinc (void)
1855 uint32_t a, b, d, mask;
1857 mask = UINT32_MAX >> (32 - MASKBITS);
1858 a = T0 & mask;
1859 b = T1 & mask;
1860 d = word_reverse(1 + word_reverse(a | ~b));
1861 T0 = (T0 & ~mask) | (d & b);
1864 #define DO_SPE_OP2(name) \
1865 void do_ev##name (void) \
1867 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
1868 (uint64_t)_do_e##name(T0_64, T1_64); \
1871 #define DO_SPE_OP1(name) \
1872 void do_ev##name (void) \
1874 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
1875 (uint64_t)_do_e##name(T0_64); \
1878 /* Fixed-point vector arithmetic */
1879 static always_inline uint32_t _do_eabs (uint32_t val)
1881 if ((val & 0x80000000) && val != 0x80000000)
1882 val -= val;
1884 return val;
1887 static always_inline uint32_t _do_eaddw (uint32_t op1, uint32_t op2)
1889 return op1 + op2;
1892 static always_inline int _do_ecntlsw (uint32_t val)
1894 if (val & 0x80000000)
1895 return clz32(~val);
1896 else
1897 return clz32(val);
1900 static always_inline int _do_ecntlzw (uint32_t val)
1902 return clz32(val);
1905 static always_inline uint32_t _do_eneg (uint32_t val)
1907 if (val != 0x80000000)
1908 val -= val;
1910 return val;
1913 static always_inline uint32_t _do_erlw (uint32_t op1, uint32_t op2)
1915 return rotl32(op1, op2);
1918 static always_inline uint32_t _do_erndw (uint32_t val)
1920 return (val + 0x000080000000) & 0xFFFF0000;
1923 static always_inline uint32_t _do_eslw (uint32_t op1, uint32_t op2)
1925 /* No error here: 6 bits are used */
1926 return op1 << (op2 & 0x3F);
1929 static always_inline int32_t _do_esrws (int32_t op1, uint32_t op2)
1931 /* No error here: 6 bits are used */
1932 return op1 >> (op2 & 0x3F);
1935 static always_inline uint32_t _do_esrwu (uint32_t op1, uint32_t op2)
1937 /* No error here: 6 bits are used */
1938 return op1 >> (op2 & 0x3F);
1941 static always_inline uint32_t _do_esubfw (uint32_t op1, uint32_t op2)
1943 return op2 - op1;
1946 /* evabs */
1947 DO_SPE_OP1(abs);
1948 /* evaddw */
1949 DO_SPE_OP2(addw);
1950 /* evcntlsw */
1951 DO_SPE_OP1(cntlsw);
1952 /* evcntlzw */
1953 DO_SPE_OP1(cntlzw);
1954 /* evneg */
1955 DO_SPE_OP1(neg);
1956 /* evrlw */
1957 DO_SPE_OP2(rlw);
1958 /* evrnd */
1959 DO_SPE_OP1(rndw);
1960 /* evslw */
1961 DO_SPE_OP2(slw);
1962 /* evsrws */
1963 DO_SPE_OP2(srws);
1964 /* evsrwu */
1965 DO_SPE_OP2(srwu);
1966 /* evsubfw */
1967 DO_SPE_OP2(subfw);
1969 /* evsel is a little bit more complicated... */
1970 static always_inline uint32_t _do_esel (uint32_t op1, uint32_t op2, int n)
1972 if (n)
1973 return op1;
1974 else
1975 return op2;
1978 void do_evsel (void)
1980 T0_64 = ((uint64_t)_do_esel(T0_64 >> 32, T1_64 >> 32, T0 >> 3) << 32) |
1981 (uint64_t)_do_esel(T0_64, T1_64, (T0 >> 2) & 1);
1984 /* Fixed-point vector comparisons */
1985 #define DO_SPE_CMP(name) \
1986 void do_ev##name (void) \
1988 T0 = _do_evcmp_merge((uint64_t)_do_e##name(T0_64 >> 32, \
1989 T1_64 >> 32) << 32, \
1990 _do_e##name(T0_64, T1_64)); \
1993 static always_inline uint32_t _do_evcmp_merge (int t0, int t1)
1995 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1997 static always_inline int _do_ecmpeq (uint32_t op1, uint32_t op2)
1999 return op1 == op2 ? 1 : 0;
2002 static always_inline int _do_ecmpgts (int32_t op1, int32_t op2)
2004 return op1 > op2 ? 1 : 0;
2007 static always_inline int _do_ecmpgtu (uint32_t op1, uint32_t op2)
2009 return op1 > op2 ? 1 : 0;
2012 static always_inline int _do_ecmplts (int32_t op1, int32_t op2)
2014 return op1 < op2 ? 1 : 0;
2017 static always_inline int _do_ecmpltu (uint32_t op1, uint32_t op2)
2019 return op1 < op2 ? 1 : 0;
2022 /* evcmpeq */
2023 DO_SPE_CMP(cmpeq);
2024 /* evcmpgts */
2025 DO_SPE_CMP(cmpgts);
2026 /* evcmpgtu */
2027 DO_SPE_CMP(cmpgtu);
2028 /* evcmplts */
2029 DO_SPE_CMP(cmplts);
2030 /* evcmpltu */
2031 DO_SPE_CMP(cmpltu);
2033 /* Single precision floating-point conversions from/to integer */
2034 static always_inline uint32_t _do_efscfsi (int32_t val)
2036 CPU_FloatU u;
2038 u.f = int32_to_float32(val, &env->spe_status);
2040 return u.l;
2043 static always_inline uint32_t _do_efscfui (uint32_t val)
2045 CPU_FloatU u;
2047 u.f = uint32_to_float32(val, &env->spe_status);
2049 return u.l;
2052 static always_inline int32_t _do_efsctsi (uint32_t val)
2054 CPU_FloatU u;
2056 u.l = val;
2057 /* NaN are not treated the same way IEEE 754 does */
2058 if (unlikely(isnan(u.f)))
2059 return 0;
2061 return float32_to_int32(u.f, &env->spe_status);
2064 static always_inline uint32_t _do_efsctui (uint32_t val)
2066 CPU_FloatU u;
2068 u.l = val;
2069 /* NaN are not treated the same way IEEE 754 does */
2070 if (unlikely(isnan(u.f)))
2071 return 0;
2073 return float32_to_uint32(u.f, &env->spe_status);
2076 static always_inline int32_t _do_efsctsiz (uint32_t val)
2078 CPU_FloatU u;
2080 u.l = val;
2081 /* NaN are not treated the same way IEEE 754 does */
2082 if (unlikely(isnan(u.f)))
2083 return 0;
2085 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2088 static always_inline uint32_t _do_efsctuiz (uint32_t val)
2090 CPU_FloatU u;
2092 u.l = val;
2093 /* NaN are not treated the same way IEEE 754 does */
2094 if (unlikely(isnan(u.f)))
2095 return 0;
2097 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2100 void do_efscfsi (void)
2102 T0_64 = _do_efscfsi(T0_64);
2105 void do_efscfui (void)
2107 T0_64 = _do_efscfui(T0_64);
2110 void do_efsctsi (void)
2112 T0_64 = _do_efsctsi(T0_64);
2115 void do_efsctui (void)
2117 T0_64 = _do_efsctui(T0_64);
2120 void do_efsctsiz (void)
2122 T0_64 = _do_efsctsiz(T0_64);
2125 void do_efsctuiz (void)
2127 T0_64 = _do_efsctuiz(T0_64);
2130 /* Single precision floating-point conversion to/from fractional */
2131 static always_inline uint32_t _do_efscfsf (uint32_t val)
2133 CPU_FloatU u;
2134 float32 tmp;
2136 u.f = int32_to_float32(val, &env->spe_status);
2137 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2138 u.f = float32_div(u.f, tmp, &env->spe_status);
2140 return u.l;
2143 static always_inline uint32_t _do_efscfuf (uint32_t val)
2145 CPU_FloatU u;
2146 float32 tmp;
2148 u.f = uint32_to_float32(val, &env->spe_status);
2149 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2150 u.f = float32_div(u.f, tmp, &env->spe_status);
2152 return u.l;
2155 static always_inline int32_t _do_efsctsf (uint32_t val)
2157 CPU_FloatU u;
2158 float32 tmp;
2160 u.l = val;
2161 /* NaN are not treated the same way IEEE 754 does */
2162 if (unlikely(isnan(u.f)))
2163 return 0;
2164 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2165 u.f = float32_mul(u.f, tmp, &env->spe_status);
2167 return float32_to_int32(u.f, &env->spe_status);
2170 static always_inline uint32_t _do_efsctuf (uint32_t val)
2172 CPU_FloatU u;
2173 float32 tmp;
2175 u.l = val;
2176 /* NaN are not treated the same way IEEE 754 does */
2177 if (unlikely(isnan(u.f)))
2178 return 0;
2179 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2180 u.f = float32_mul(u.f, tmp, &env->spe_status);
2182 return float32_to_uint32(u.f, &env->spe_status);
2185 static always_inline int32_t _do_efsctsfz (uint32_t val)
2187 CPU_FloatU u;
2188 float32 tmp;
2190 u.l = val;
2191 /* NaN are not treated the same way IEEE 754 does */
2192 if (unlikely(isnan(u.f)))
2193 return 0;
2194 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2195 u.f = float32_mul(u.f, tmp, &env->spe_status);
2197 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2200 static always_inline uint32_t _do_efsctufz (uint32_t val)
2202 CPU_FloatU u;
2203 float32 tmp;
2205 u.l = val;
2206 /* NaN are not treated the same way IEEE 754 does */
2207 if (unlikely(isnan(u.f)))
2208 return 0;
2209 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2210 u.f = float32_mul(u.f, tmp, &env->spe_status);
2212 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2215 void do_efscfsf (void)
2217 T0_64 = _do_efscfsf(T0_64);
2220 void do_efscfuf (void)
2222 T0_64 = _do_efscfuf(T0_64);
2225 void do_efsctsf (void)
2227 T0_64 = _do_efsctsf(T0_64);
2230 void do_efsctuf (void)
2232 T0_64 = _do_efsctuf(T0_64);
2235 void do_efsctsfz (void)
2237 T0_64 = _do_efsctsfz(T0_64);
2240 void do_efsctufz (void)
2242 T0_64 = _do_efsctufz(T0_64);
2245 /* Double precision floating point helpers */
2246 static always_inline int _do_efdcmplt (uint64_t op1, uint64_t op2)
2248 /* XXX: TODO: test special values (NaN, infinites, ...) */
2249 return _do_efdtstlt(op1, op2);
2252 static always_inline int _do_efdcmpgt (uint64_t op1, uint64_t op2)
2254 /* XXX: TODO: test special values (NaN, infinites, ...) */
2255 return _do_efdtstgt(op1, op2);
2258 static always_inline int _do_efdcmpeq (uint64_t op1, uint64_t op2)
2260 /* XXX: TODO: test special values (NaN, infinites, ...) */
2261 return _do_efdtsteq(op1, op2);
2264 void do_efdcmplt (void)
2266 T0 = _do_efdcmplt(T0_64, T1_64);
2269 void do_efdcmpgt (void)
2271 T0 = _do_efdcmpgt(T0_64, T1_64);
2274 void do_efdcmpeq (void)
2276 T0 = _do_efdcmpeq(T0_64, T1_64);
2279 /* Double precision floating-point conversion to/from integer */
2280 static always_inline uint64_t _do_efdcfsi (int64_t val)
2282 CPU_DoubleU u;
2284 u.d = int64_to_float64(val, &env->spe_status);
2286 return u.ll;
2289 static always_inline uint64_t _do_efdcfui (uint64_t val)
2291 CPU_DoubleU u;
2293 u.d = uint64_to_float64(val, &env->spe_status);
2295 return u.ll;
2298 static always_inline int64_t _do_efdctsi (uint64_t val)
2300 CPU_DoubleU u;
2302 u.ll = val;
2303 /* NaN are not treated the same way IEEE 754 does */
2304 if (unlikely(isnan(u.d)))
2305 return 0;
2307 return float64_to_int64(u.d, &env->spe_status);
2310 static always_inline uint64_t _do_efdctui (uint64_t val)
2312 CPU_DoubleU u;
2314 u.ll = val;
2315 /* NaN are not treated the same way IEEE 754 does */
2316 if (unlikely(isnan(u.d)))
2317 return 0;
2319 return float64_to_uint64(u.d, &env->spe_status);
2322 static always_inline int64_t _do_efdctsiz (uint64_t val)
2324 CPU_DoubleU u;
2326 u.ll = val;
2327 /* NaN are not treated the same way IEEE 754 does */
2328 if (unlikely(isnan(u.d)))
2329 return 0;
2331 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2334 static always_inline uint64_t _do_efdctuiz (uint64_t val)
2336 CPU_DoubleU u;
2338 u.ll = val;
2339 /* NaN are not treated the same way IEEE 754 does */
2340 if (unlikely(isnan(u.d)))
2341 return 0;
2343 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2346 void do_efdcfsi (void)
2348 T0_64 = _do_efdcfsi(T0_64);
2351 void do_efdcfui (void)
2353 T0_64 = _do_efdcfui(T0_64);
2356 void do_efdctsi (void)
2358 T0_64 = _do_efdctsi(T0_64);
2361 void do_efdctui (void)
2363 T0_64 = _do_efdctui(T0_64);
2366 void do_efdctsiz (void)
2368 T0_64 = _do_efdctsiz(T0_64);
2371 void do_efdctuiz (void)
2373 T0_64 = _do_efdctuiz(T0_64);
2376 /* Double precision floating-point conversion to/from fractional */
2377 static always_inline uint64_t _do_efdcfsf (int64_t val)
2379 CPU_DoubleU u;
2380 float64 tmp;
2382 u.d = int32_to_float64(val, &env->spe_status);
2383 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2384 u.d = float64_div(u.d, tmp, &env->spe_status);
2386 return u.ll;
2389 static always_inline uint64_t _do_efdcfuf (uint64_t val)
2391 CPU_DoubleU u;
2392 float64 tmp;
2394 u.d = uint32_to_float64(val, &env->spe_status);
2395 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2396 u.d = float64_div(u.d, tmp, &env->spe_status);
2398 return u.ll;
2401 static always_inline int64_t _do_efdctsf (uint64_t val)
2403 CPU_DoubleU u;
2404 float64 tmp;
2406 u.ll = val;
2407 /* NaN are not treated the same way IEEE 754 does */
2408 if (unlikely(isnan(u.d)))
2409 return 0;
2410 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2411 u.d = float64_mul(u.d, tmp, &env->spe_status);
2413 return float64_to_int32(u.d, &env->spe_status);
2416 static always_inline uint64_t _do_efdctuf (uint64_t val)
2418 CPU_DoubleU u;
2419 float64 tmp;
2421 u.ll = val;
2422 /* NaN are not treated the same way IEEE 754 does */
2423 if (unlikely(isnan(u.d)))
2424 return 0;
2425 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2426 u.d = float64_mul(u.d, tmp, &env->spe_status);
2428 return float64_to_uint32(u.d, &env->spe_status);
2431 static always_inline int64_t _do_efdctsfz (uint64_t val)
2433 CPU_DoubleU u;
2434 float64 tmp;
2436 u.ll = val;
2437 /* NaN are not treated the same way IEEE 754 does */
2438 if (unlikely(isnan(u.d)))
2439 return 0;
2440 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2441 u.d = float64_mul(u.d, tmp, &env->spe_status);
2443 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2446 static always_inline uint64_t _do_efdctufz (uint64_t val)
2448 CPU_DoubleU u;
2449 float64 tmp;
2451 u.ll = val;
2452 /* NaN are not treated the same way IEEE 754 does */
2453 if (unlikely(isnan(u.d)))
2454 return 0;
2455 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2456 u.d = float64_mul(u.d, tmp, &env->spe_status);
2458 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2461 void do_efdcfsf (void)
2463 T0_64 = _do_efdcfsf(T0_64);
2466 void do_efdcfuf (void)
2468 T0_64 = _do_efdcfuf(T0_64);
2471 void do_efdctsf (void)
2473 T0_64 = _do_efdctsf(T0_64);
2476 void do_efdctuf (void)
2478 T0_64 = _do_efdctuf(T0_64);
2481 void do_efdctsfz (void)
2483 T0_64 = _do_efdctsfz(T0_64);
2486 void do_efdctufz (void)
2488 T0_64 = _do_efdctufz(T0_64);
2491 /* Floating point conversion between single and double precision */
2492 static always_inline uint32_t _do_efscfd (uint64_t val)
2494 CPU_DoubleU u1;
2495 CPU_FloatU u2;
2497 u1.ll = val;
2498 u2.f = float64_to_float32(u1.d, &env->spe_status);
2500 return u2.l;
2503 static always_inline uint64_t _do_efdcfs (uint32_t val)
2505 CPU_DoubleU u2;
2506 CPU_FloatU u1;
2508 u1.l = val;
2509 u2.d = float32_to_float64(u1.f, &env->spe_status);
2511 return u2.ll;
2514 void do_efscfd (void)
2516 T0_64 = _do_efscfd(T0_64);
2519 void do_efdcfs (void)
2521 T0_64 = _do_efdcfs(T0_64);
2524 /* Single precision fixed-point vector arithmetic */
2525 /* evfsabs */
2526 DO_SPE_OP1(fsabs);
2527 /* evfsnabs */
2528 DO_SPE_OP1(fsnabs);
2529 /* evfsneg */
2530 DO_SPE_OP1(fsneg);
2531 /* evfsadd */
2532 DO_SPE_OP2(fsadd);
2533 /* evfssub */
2534 DO_SPE_OP2(fssub);
2535 /* evfsmul */
2536 DO_SPE_OP2(fsmul);
2537 /* evfsdiv */
2538 DO_SPE_OP2(fsdiv);
2540 /* Single-precision floating-point comparisons */
2541 static always_inline int _do_efscmplt (uint32_t op1, uint32_t op2)
2543 /* XXX: TODO: test special values (NaN, infinites, ...) */
2544 return _do_efststlt(op1, op2);
2547 static always_inline int _do_efscmpgt (uint32_t op1, uint32_t op2)
2549 /* XXX: TODO: test special values (NaN, infinites, ...) */
2550 return _do_efststgt(op1, op2);
2553 static always_inline int _do_efscmpeq (uint32_t op1, uint32_t op2)
2555 /* XXX: TODO: test special values (NaN, infinites, ...) */
2556 return _do_efststeq(op1, op2);
2559 void do_efscmplt (void)
2561 T0 = _do_efscmplt(T0_64, T1_64);
2564 void do_efscmpgt (void)
2566 T0 = _do_efscmpgt(T0_64, T1_64);
2569 void do_efscmpeq (void)
2571 T0 = _do_efscmpeq(T0_64, T1_64);
2574 /* Single-precision floating-point vector comparisons */
2575 /* evfscmplt */
2576 DO_SPE_CMP(fscmplt);
2577 /* evfscmpgt */
2578 DO_SPE_CMP(fscmpgt);
2579 /* evfscmpeq */
2580 DO_SPE_CMP(fscmpeq);
2581 /* evfststlt */
2582 DO_SPE_CMP(fststlt);
2583 /* evfststgt */
2584 DO_SPE_CMP(fststgt);
2585 /* evfststeq */
2586 DO_SPE_CMP(fststeq);
2588 /* Single-precision floating-point vector conversions */
2589 /* evfscfsi */
2590 DO_SPE_OP1(fscfsi);
2591 /* evfscfui */
2592 DO_SPE_OP1(fscfui);
2593 /* evfscfuf */
2594 DO_SPE_OP1(fscfuf);
2595 /* evfscfsf */
2596 DO_SPE_OP1(fscfsf);
2597 /* evfsctsi */
2598 DO_SPE_OP1(fsctsi);
2599 /* evfsctui */
2600 DO_SPE_OP1(fsctui);
2601 /* evfsctsiz */
2602 DO_SPE_OP1(fsctsiz);
2603 /* evfsctuiz */
2604 DO_SPE_OP1(fsctuiz);
2605 /* evfsctsf */
2606 DO_SPE_OP1(fsctsf);
2607 /* evfsctuf */
2608 DO_SPE_OP1(fsctuf);
2610 /*****************************************************************************/
2611 /* Softmmu support */
2612 #if !defined (CONFIG_USER_ONLY)
2614 #define MMUSUFFIX _mmu
2615 #ifdef __s390__
2616 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
2617 #else
2618 # define GETPC() (__builtin_return_address(0))
2619 #endif
2621 #define SHIFT 0
2622 #include "softmmu_template.h"
2624 #define SHIFT 1
2625 #include "softmmu_template.h"
2627 #define SHIFT 2
2628 #include "softmmu_template.h"
2630 #define SHIFT 3
2631 #include "softmmu_template.h"
2633 /* try to fill the TLB and return an exception if error. If retaddr is
2634 NULL, it means that the function was called in C code (i.e. not
2635 from generated code or from helper.c) */
2636 /* XXX: fix it to restore all registers */
2637 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2639 TranslationBlock *tb;
2640 CPUState *saved_env;
2641 unsigned long pc;
2642 int ret;
2644 /* XXX: hack to restore env in all cases, even if not called from
2645 generated code */
2646 saved_env = env;
2647 env = cpu_single_env;
2648 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2649 if (unlikely(ret != 0)) {
2650 if (likely(retaddr)) {
2651 /* now we have a real cpu fault */
2652 pc = (unsigned long)retaddr;
2653 tb = tb_find_pc(pc);
2654 if (likely(tb)) {
2655 /* the PC is inside the translated code. It means that we have
2656 a virtual CPU fault */
2657 cpu_restore_state(tb, env, pc, NULL);
2660 do_raise_exception_err(env->exception_index, env->error_code);
2662 env = saved_env;
2665 /* Software driven TLBs management */
2666 /* PowerPC 602/603 software TLB load instructions helpers */
2667 void do_load_6xx_tlb (int is_code)
2669 target_ulong RPN, CMP, EPN;
2670 int way;
2672 RPN = env->spr[SPR_RPA];
2673 if (is_code) {
2674 CMP = env->spr[SPR_ICMP];
2675 EPN = env->spr[SPR_IMISS];
2676 } else {
2677 CMP = env->spr[SPR_DCMP];
2678 EPN = env->spr[SPR_DMISS];
2680 way = (env->spr[SPR_SRR1] >> 17) & 1;
2681 #if defined (DEBUG_SOFTWARE_TLB)
2682 if (loglevel != 0) {
2683 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2684 " PTE1 " ADDRX " way %d\n",
2685 __func__, T0, EPN, CMP, RPN, way);
2687 #endif
2688 /* Store this TLB */
2689 ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
2690 way, is_code, CMP, RPN);
2693 void do_load_74xx_tlb (int is_code)
2695 target_ulong RPN, CMP, EPN;
2696 int way;
2698 RPN = env->spr[SPR_PTELO];
2699 CMP = env->spr[SPR_PTEHI];
2700 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2701 way = env->spr[SPR_TLBMISS] & 0x3;
2702 #if defined (DEBUG_SOFTWARE_TLB)
2703 if (loglevel != 0) {
2704 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2705 " PTE1 " ADDRX " way %d\n",
2706 __func__, T0, EPN, CMP, RPN, way);
2708 #endif
2709 /* Store this TLB */
2710 ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
2711 way, is_code, CMP, RPN);
2714 static always_inline target_ulong booke_tlb_to_page_size (int size)
2716 return 1024 << (2 * size);
2719 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2721 int size;
2723 switch (page_size) {
2724 case 0x00000400UL:
2725 size = 0x0;
2726 break;
2727 case 0x00001000UL:
2728 size = 0x1;
2729 break;
2730 case 0x00004000UL:
2731 size = 0x2;
2732 break;
2733 case 0x00010000UL:
2734 size = 0x3;
2735 break;
2736 case 0x00040000UL:
2737 size = 0x4;
2738 break;
2739 case 0x00100000UL:
2740 size = 0x5;
2741 break;
2742 case 0x00400000UL:
2743 size = 0x6;
2744 break;
2745 case 0x01000000UL:
2746 size = 0x7;
2747 break;
2748 case 0x04000000UL:
2749 size = 0x8;
2750 break;
2751 case 0x10000000UL:
2752 size = 0x9;
2753 break;
2754 case 0x40000000UL:
2755 size = 0xA;
2756 break;
2757 #if defined (TARGET_PPC64)
2758 case 0x000100000000ULL:
2759 size = 0xB;
2760 break;
2761 case 0x000400000000ULL:
2762 size = 0xC;
2763 break;
2764 case 0x001000000000ULL:
2765 size = 0xD;
2766 break;
2767 case 0x004000000000ULL:
2768 size = 0xE;
2769 break;
2770 case 0x010000000000ULL:
2771 size = 0xF;
2772 break;
2773 #endif
2774 default:
2775 size = -1;
2776 break;
2779 return size;
2782 /* Helpers for 4xx TLB management */
2783 void do_4xx_tlbre_lo (void)
2785 ppcemb_tlb_t *tlb;
2786 int size;
2788 T0 &= 0x3F;
2789 tlb = &env->tlb[T0].tlbe;
2790 T0 = tlb->EPN;
2791 if (tlb->prot & PAGE_VALID)
2792 T0 |= 0x400;
2793 size = booke_page_size_to_tlb(tlb->size);
2794 if (size < 0 || size > 0x7)
2795 size = 1;
2796 T0 |= size << 7;
2797 env->spr[SPR_40x_PID] = tlb->PID;
2800 void do_4xx_tlbre_hi (void)
2802 ppcemb_tlb_t *tlb;
2804 T0 &= 0x3F;
2805 tlb = &env->tlb[T0].tlbe;
2806 T0 = tlb->RPN;
2807 if (tlb->prot & PAGE_EXEC)
2808 T0 |= 0x200;
2809 if (tlb->prot & PAGE_WRITE)
2810 T0 |= 0x100;
2813 void do_4xx_tlbwe_hi (void)
2815 ppcemb_tlb_t *tlb;
2816 target_ulong page, end;
2818 #if defined (DEBUG_SOFTWARE_TLB)
2819 if (loglevel != 0) {
2820 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2822 #endif
2823 T0 &= 0x3F;
2824 tlb = &env->tlb[T0].tlbe;
2825 /* Invalidate previous TLB (if it's valid) */
2826 if (tlb->prot & PAGE_VALID) {
2827 end = tlb->EPN + tlb->size;
2828 #if defined (DEBUG_SOFTWARE_TLB)
2829 if (loglevel != 0) {
2830 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2831 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2833 #endif
2834 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2835 tlb_flush_page(env, page);
2837 tlb->size = booke_tlb_to_page_size((T1 >> 7) & 0x7);
2838 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2839 * If this ever occurs, one should use the ppcemb target instead
2840 * of the ppc or ppc64 one
2842 if ((T1 & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2843 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2844 "are not supported (%d)\n",
2845 tlb->size, TARGET_PAGE_SIZE, (int)((T1 >> 7) & 0x7));
2847 tlb->EPN = T1 & ~(tlb->size - 1);
2848 if (T1 & 0x40)
2849 tlb->prot |= PAGE_VALID;
2850 else
2851 tlb->prot &= ~PAGE_VALID;
2852 if (T1 & 0x20) {
2853 /* XXX: TO BE FIXED */
2854 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2856 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2857 tlb->attr = T1 & 0xFF;
2858 #if defined (DEBUG_SOFTWARE_TLB)
2859 if (loglevel != 0) {
2860 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2861 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2862 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2863 tlb->prot & PAGE_READ ? 'r' : '-',
2864 tlb->prot & PAGE_WRITE ? 'w' : '-',
2865 tlb->prot & PAGE_EXEC ? 'x' : '-',
2866 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2868 #endif
2869 /* Invalidate new TLB (if valid) */
2870 if (tlb->prot & PAGE_VALID) {
2871 end = tlb->EPN + tlb->size;
2872 #if defined (DEBUG_SOFTWARE_TLB)
2873 if (loglevel != 0) {
2874 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2875 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2877 #endif
2878 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2879 tlb_flush_page(env, page);
2883 void do_4xx_tlbwe_lo (void)
2885 ppcemb_tlb_t *tlb;
2887 #if defined (DEBUG_SOFTWARE_TLB)
2888 if (loglevel != 0) {
2889 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2891 #endif
2892 T0 &= 0x3F;
2893 tlb = &env->tlb[T0].tlbe;
2894 tlb->RPN = T1 & 0xFFFFFC00;
2895 tlb->prot = PAGE_READ;
2896 if (T1 & 0x200)
2897 tlb->prot |= PAGE_EXEC;
2898 if (T1 & 0x100)
2899 tlb->prot |= PAGE_WRITE;
2900 #if defined (DEBUG_SOFTWARE_TLB)
2901 if (loglevel != 0) {
2902 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2903 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2904 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2905 tlb->prot & PAGE_READ ? 'r' : '-',
2906 tlb->prot & PAGE_WRITE ? 'w' : '-',
2907 tlb->prot & PAGE_EXEC ? 'x' : '-',
2908 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2910 #endif
2913 /* PowerPC 440 TLB management */
2914 void do_440_tlbwe (int word)
2916 ppcemb_tlb_t *tlb;
2917 target_ulong EPN, RPN, size;
2918 int do_flush_tlbs;
2920 #if defined (DEBUG_SOFTWARE_TLB)
2921 if (loglevel != 0) {
2922 fprintf(logfile, "%s word %d T0 " TDX " T1 " TDX "\n",
2923 __func__, word, T0, T1);
2925 #endif
2926 do_flush_tlbs = 0;
2927 T0 &= 0x3F;
2928 tlb = &env->tlb[T0].tlbe;
2929 switch (word) {
2930 default:
2931 /* Just here to please gcc */
2932 case 0:
2933 EPN = T1 & 0xFFFFFC00;
2934 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
2935 do_flush_tlbs = 1;
2936 tlb->EPN = EPN;
2937 size = booke_tlb_to_page_size((T1 >> 4) & 0xF);
2938 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
2939 do_flush_tlbs = 1;
2940 tlb->size = size;
2941 tlb->attr &= ~0x1;
2942 tlb->attr |= (T1 >> 8) & 1;
2943 if (T1 & 0x200) {
2944 tlb->prot |= PAGE_VALID;
2945 } else {
2946 if (tlb->prot & PAGE_VALID) {
2947 tlb->prot &= ~PAGE_VALID;
2948 do_flush_tlbs = 1;
2951 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2952 if (do_flush_tlbs)
2953 tlb_flush(env, 1);
2954 break;
2955 case 1:
2956 RPN = T1 & 0xFFFFFC0F;
2957 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
2958 tlb_flush(env, 1);
2959 tlb->RPN = RPN;
2960 break;
2961 case 2:
2962 tlb->attr = (tlb->attr & 0x1) | (T1 & 0x0000FF00);
2963 tlb->prot = tlb->prot & PAGE_VALID;
2964 if (T1 & 0x1)
2965 tlb->prot |= PAGE_READ << 4;
2966 if (T1 & 0x2)
2967 tlb->prot |= PAGE_WRITE << 4;
2968 if (T1 & 0x4)
2969 tlb->prot |= PAGE_EXEC << 4;
2970 if (T1 & 0x8)
2971 tlb->prot |= PAGE_READ;
2972 if (T1 & 0x10)
2973 tlb->prot |= PAGE_WRITE;
2974 if (T1 & 0x20)
2975 tlb->prot |= PAGE_EXEC;
2976 break;
2980 void do_440_tlbre (int word)
2982 ppcemb_tlb_t *tlb;
2983 int size;
2985 T0 &= 0x3F;
2986 tlb = &env->tlb[T0].tlbe;
2987 switch (word) {
2988 default:
2989 /* Just here to please gcc */
2990 case 0:
2991 T0 = tlb->EPN;
2992 size = booke_page_size_to_tlb(tlb->size);
2993 if (size < 0 || size > 0xF)
2994 size = 1;
2995 T0 |= size << 4;
2996 if (tlb->attr & 0x1)
2997 T0 |= 0x100;
2998 if (tlb->prot & PAGE_VALID)
2999 T0 |= 0x200;
3000 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3001 env->spr[SPR_440_MMUCR] |= tlb->PID;
3002 break;
3003 case 1:
3004 T0 = tlb->RPN;
3005 break;
3006 case 2:
3007 T0 = tlb->attr & ~0x1;
3008 if (tlb->prot & (PAGE_READ << 4))
3009 T0 |= 0x1;
3010 if (tlb->prot & (PAGE_WRITE << 4))
3011 T0 |= 0x2;
3012 if (tlb->prot & (PAGE_EXEC << 4))
3013 T0 |= 0x4;
3014 if (tlb->prot & PAGE_READ)
3015 T0 |= 0x8;
3016 if (tlb->prot & PAGE_WRITE)
3017 T0 |= 0x10;
3018 if (tlb->prot & PAGE_EXEC)
3019 T0 |= 0x20;
3020 break;
3023 #endif /* !CONFIG_USER_ONLY */