usb-ehci: Handle ISO packets failing with an error other then NAK
[qemu/ar7.git] / target-s390x / op_helper.c
blobcf26b29ee9ccdf28943ab3fe4d58e8729e7d580a
1 /*
2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
24 #include "helpers.h"
25 #include <string.h>
26 #include "kvm.h"
27 #include "qemu-timer.h"
28 #ifdef CONFIG_KVM
29 #include <linux/kvm.h>
30 #endif
32 #if !defined (CONFIG_USER_ONLY)
33 #include "sysemu.h"
34 #endif
36 /*****************************************************************************/
37 /* Softmmu support */
38 #if !defined (CONFIG_USER_ONLY)
39 #include "softmmu_exec.h"
41 #define MMUSUFFIX _mmu
43 #define SHIFT 0
44 #include "softmmu_template.h"
46 #define SHIFT 1
47 #include "softmmu_template.h"
49 #define SHIFT 2
50 #include "softmmu_template.h"
52 #define SHIFT 3
53 #include "softmmu_template.h"
55 /* try to fill the TLB and return an exception if error. If retaddr is
56 NULL, it means that the function was called in C code (i.e. not
57 from generated code or from helper.c) */
58 /* XXX: fix it to restore all registers */
59 void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
60 void *retaddr)
62 TranslationBlock *tb;
63 CPUState *saved_env;
64 unsigned long pc;
65 int ret;
67 saved_env = env;
68 env = env1;
69 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
70 if (unlikely(ret != 0)) {
71 if (likely(retaddr)) {
72 /* now we have a real cpu fault */
73 pc = (unsigned long)retaddr;
74 tb = tb_find_pc(pc);
75 if (likely(tb)) {
76 /* the PC is inside the translated code. It means that we have
77 a virtual CPU fault */
78 cpu_restore_state(tb, env, pc);
81 cpu_loop_exit(env);
83 env = saved_env;
86 #endif
88 /* #define DEBUG_HELPER */
89 #ifdef DEBUG_HELPER
90 #define HELPER_LOG(x...) qemu_log(x)
91 #else
92 #define HELPER_LOG(x...)
93 #endif
95 /* raise an exception */
96 void HELPER(exception)(uint32_t excp)
98 HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
99 env->exception_index = excp;
100 cpu_loop_exit(env);
103 #ifndef CONFIG_USER_ONLY
104 static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest,
105 uint8_t byte)
107 target_phys_addr_t dest_phys;
108 target_phys_addr_t len = l;
109 void *dest_p;
110 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
111 int flags;
113 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
114 stb(dest, byte);
115 cpu_abort(env, "should never reach here");
117 dest_phys |= dest & ~TARGET_PAGE_MASK;
119 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
121 memset(dest_p, byte, len);
123 cpu_physical_memory_unmap(dest_p, 1, len, len);
126 static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest,
127 uint64_t src)
129 target_phys_addr_t dest_phys;
130 target_phys_addr_t src_phys;
131 target_phys_addr_t len = l;
132 void *dest_p;
133 void *src_p;
134 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
135 int flags;
137 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
138 stb(dest, 0);
139 cpu_abort(env, "should never reach here");
141 dest_phys |= dest & ~TARGET_PAGE_MASK;
143 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
144 ldub(src);
145 cpu_abort(env, "should never reach here");
147 src_phys |= src & ~TARGET_PAGE_MASK;
149 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
150 src_p = cpu_physical_memory_map(src_phys, &len, 0);
152 memmove(dest_p, src_p, len);
154 cpu_physical_memory_unmap(dest_p, 1, len, len);
155 cpu_physical_memory_unmap(src_p, 0, len, len);
157 #endif
159 /* and on array */
160 uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
162 int i;
163 unsigned char x;
164 uint32_t cc = 0;
166 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
167 __FUNCTION__, l, dest, src);
168 for (i = 0; i <= l; i++) {
169 x = ldub(dest + i) & ldub(src + i);
170 if (x) {
171 cc = 1;
173 stb(dest + i, x);
175 return cc;
178 /* xor on array */
179 uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
181 int i;
182 unsigned char x;
183 uint32_t cc = 0;
185 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
186 __FUNCTION__, l, dest, src);
188 #ifndef CONFIG_USER_ONLY
189 /* xor with itself is the same as memset(0) */
190 if ((l > 32) && (src == dest) &&
191 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
192 mvc_fast_memset(env, l + 1, dest, 0);
193 return 0;
195 #else
196 if (src == dest) {
197 memset(g2h(dest), 0, l + 1);
198 return 0;
200 #endif
202 for (i = 0; i <= l; i++) {
203 x = ldub(dest + i) ^ ldub(src + i);
204 if (x) {
205 cc = 1;
207 stb(dest + i, x);
209 return cc;
212 /* or on array */
213 uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
215 int i;
216 unsigned char x;
217 uint32_t cc = 0;
219 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
220 __FUNCTION__, l, dest, src);
221 for (i = 0; i <= l; i++) {
222 x = ldub(dest + i) | ldub(src + i);
223 if (x) {
224 cc = 1;
226 stb(dest + i, x);
228 return cc;
231 /* memmove */
232 void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
234 int i = 0;
235 int x = 0;
236 uint32_t l_64 = (l + 1) / 8;
238 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
239 __FUNCTION__, l, dest, src);
241 #ifndef CONFIG_USER_ONLY
242 if ((l > 32) &&
243 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
244 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
245 if (dest == (src + 1)) {
246 mvc_fast_memset(env, l + 1, dest, ldub(src));
247 return;
248 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
249 mvc_fast_memmove(env, l + 1, dest, src);
250 return;
253 #else
254 if (dest == (src + 1)) {
255 memset(g2h(dest), ldub(src), l + 1);
256 return;
257 } else {
258 memmove(g2h(dest), g2h(src), l + 1);
259 return;
261 #endif
263 /* handle the parts that fit into 8-byte loads/stores */
264 if (dest != (src + 1)) {
265 for (i = 0; i < l_64; i++) {
266 stq(dest + x, ldq(src + x));
267 x += 8;
271 /* slow version crossing pages with byte accesses */
272 for (i = x; i <= l; i++) {
273 stb(dest + i, ldub(src + i));
277 /* compare unsigned byte arrays */
278 uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
280 int i;
281 unsigned char x,y;
282 uint32_t cc;
283 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
284 __FUNCTION__, l, s1, s2);
285 for (i = 0; i <= l; i++) {
286 x = ldub(s1 + i);
287 y = ldub(s2 + i);
288 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
289 if (x < y) {
290 cc = 1;
291 goto done;
292 } else if (x > y) {
293 cc = 2;
294 goto done;
297 cc = 0;
298 done:
299 HELPER_LOG("\n");
300 return cc;
303 /* compare logical under mask */
304 uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
306 uint8_t r,d;
307 uint32_t cc;
308 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
309 mask, addr);
310 cc = 0;
311 while (mask) {
312 if (mask & 8) {
313 d = ldub(addr);
314 r = (r1 & 0xff000000UL) >> 24;
315 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
316 addr);
317 if (r < d) {
318 cc = 1;
319 break;
320 } else if (r > d) {
321 cc = 2;
322 break;
324 addr++;
326 mask = (mask << 1) & 0xf;
327 r1 <<= 8;
329 HELPER_LOG("\n");
330 return cc;
333 /* store character under mask */
334 void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
336 uint8_t r;
337 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
338 addr);
339 while (mask) {
340 if (mask & 8) {
341 r = (r1 & 0xff000000UL) >> 24;
342 stb(addr, r);
343 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
344 addr++;
346 mask = (mask << 1) & 0xf;
347 r1 <<= 8;
349 HELPER_LOG("\n");
352 /* 64/64 -> 128 unsigned multiplication */
353 void HELPER(mlg)(uint32_t r1, uint64_t v2)
355 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
356 /* assuming 64-bit hosts have __uint128_t */
357 __uint128_t res = (__uint128_t)env->regs[r1 + 1];
358 res *= (__uint128_t)v2;
359 env->regs[r1] = (uint64_t)(res >> 64);
360 env->regs[r1 + 1] = (uint64_t)res;
361 #else
362 mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
363 #endif
366 /* 128 -> 64/64 unsigned division */
367 void HELPER(dlg)(uint32_t r1, uint64_t v2)
369 uint64_t divisor = v2;
371 if (!env->regs[r1]) {
372 /* 64 -> 64/64 case */
373 env->regs[r1] = env->regs[r1+1] % divisor;
374 env->regs[r1+1] = env->regs[r1+1] / divisor;
375 return;
376 } else {
378 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
379 /* assuming 64-bit hosts have __uint128_t */
380 __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
381 (env->regs[r1+1]);
382 __uint128_t quotient = dividend / divisor;
383 env->regs[r1+1] = quotient;
384 __uint128_t remainder = dividend % divisor;
385 env->regs[r1] = remainder;
386 #else
387 /* 32-bit hosts would need special wrapper functionality - just abort if
388 we encounter such a case; it's very unlikely anyways. */
389 cpu_abort(env, "128 -> 64/64 division not implemented\n");
390 #endif
394 static inline uint64_t get_address(int x2, int b2, int d2)
396 uint64_t r = d2;
398 if (x2) {
399 r += env->regs[x2];
402 if (b2) {
403 r += env->regs[b2];
406 /* 31-Bit mode */
407 if (!(env->psw.mask & PSW_MASK_64)) {
408 r &= 0x7fffffff;
411 return r;
414 static inline uint64_t get_address_31fix(int reg)
416 uint64_t r = env->regs[reg];
418 /* 31-Bit mode */
419 if (!(env->psw.mask & PSW_MASK_64)) {
420 r &= 0x7fffffff;
423 return r;
426 /* search string (c is byte to search, r2 is string, r1 end of string) */
427 uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
429 uint64_t i;
430 uint32_t cc = 2;
431 uint64_t str = get_address_31fix(r2);
432 uint64_t end = get_address_31fix(r1);
434 HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
435 c, env->regs[r1], env->regs[r2]);
437 for (i = str; i != end; i++) {
438 if (ldub(i) == c) {
439 env->regs[r1] = i;
440 cc = 1;
441 break;
445 return cc;
448 /* unsigned string compare (c is string terminator) */
449 uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
451 uint64_t s1 = get_address_31fix(r1);
452 uint64_t s2 = get_address_31fix(r2);
453 uint8_t v1, v2;
454 uint32_t cc;
455 c = c & 0xff;
456 #ifdef CONFIG_USER_ONLY
457 if (!c) {
458 HELPER_LOG("%s: comparing '%s' and '%s'\n",
459 __FUNCTION__, (char*)g2h(s1), (char*)g2h(s2));
461 #endif
462 for (;;) {
463 v1 = ldub(s1);
464 v2 = ldub(s2);
465 if ((v1 == c || v2 == c) || (v1 != v2)) {
466 break;
468 s1++;
469 s2++;
472 if (v1 == v2) {
473 cc = 0;
474 } else {
475 cc = (v1 < v2) ? 1 : 2;
476 /* FIXME: 31-bit mode! */
477 env->regs[r1] = s1;
478 env->regs[r2] = s2;
480 return cc;
483 /* move page */
484 void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
486 /* XXX missing r0 handling */
487 #ifdef CONFIG_USER_ONLY
488 int i;
490 for (i = 0; i < TARGET_PAGE_SIZE; i++) {
491 stb(r1 + i, ldub(r2 + i));
493 #else
494 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
495 #endif
498 /* string copy (c is string terminator) */
499 void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
501 uint64_t dest = get_address_31fix(r1);
502 uint64_t src = get_address_31fix(r2);
503 uint8_t v;
504 c = c & 0xff;
505 #ifdef CONFIG_USER_ONLY
506 if (!c) {
507 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src),
508 dest);
510 #endif
511 for (;;) {
512 v = ldub(src);
513 stb(dest, v);
514 if (v == c) {
515 break;
517 src++;
518 dest++;
520 env->regs[r1] = dest; /* FIXME: 31-bit mode! */
523 /* compare and swap 64-bit */
524 uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
526 /* FIXME: locking? */
527 uint32_t cc;
528 uint64_t v2 = ldq(a2);
529 if (env->regs[r1] == v2) {
530 cc = 0;
531 stq(a2, env->regs[r3]);
532 } else {
533 cc = 1;
534 env->regs[r1] = v2;
536 return cc;
539 /* compare double and swap 64-bit */
540 uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
542 /* FIXME: locking? */
543 uint32_t cc;
544 uint64_t v2_hi = ldq(a2);
545 uint64_t v2_lo = ldq(a2 + 8);
546 uint64_t v1_hi = env->regs[r1];
547 uint64_t v1_lo = env->regs[r1 + 1];
549 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
550 cc = 0;
551 stq(a2, env->regs[r3]);
552 stq(a2 + 8, env->regs[r3 + 1]);
553 } else {
554 cc = 1;
555 env->regs[r1] = v2_hi;
556 env->regs[r1 + 1] = v2_lo;
559 return cc;
562 /* compare and swap 32-bit */
563 uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
565 /* FIXME: locking? */
566 uint32_t cc;
567 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
568 uint32_t v2 = ldl(a2);
569 if (((uint32_t)env->regs[r1]) == v2) {
570 cc = 0;
571 stl(a2, (uint32_t)env->regs[r3]);
572 } else {
573 cc = 1;
574 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
576 return cc;
579 static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
581 int pos = 24; /* top of the lower half of r1 */
582 uint64_t rmask = 0xff000000ULL;
583 uint8_t val = 0;
584 int ccd = 0;
585 uint32_t cc = 0;
587 while (mask) {
588 if (mask & 8) {
589 env->regs[r1] &= ~rmask;
590 val = ldub(address);
591 if ((val & 0x80) && !ccd) {
592 cc = 1;
594 ccd = 1;
595 if (val && cc == 0) {
596 cc = 2;
598 env->regs[r1] |= (uint64_t)val << pos;
599 address++;
601 mask = (mask << 1) & 0xf;
602 pos -= 8;
603 rmask >>= 8;
606 return cc;
609 /* execute instruction
610 this instruction executes an insn modified with the contents of r1
611 it does not change the executed instruction in memory
612 it does not change the program counter
613 in other words: tricky...
614 currently implemented by interpreting the cases it is most commonly used in
616 uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
618 uint16_t insn = lduw_code(addr);
619 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
620 insn);
621 if ((insn & 0xf0ff) == 0xd000) {
622 uint32_t l, insn2, b1, b2, d1, d2;
623 l = v1 & 0xff;
624 insn2 = ldl_code(addr + 2);
625 b1 = (insn2 >> 28) & 0xf;
626 b2 = (insn2 >> 12) & 0xf;
627 d1 = (insn2 >> 16) & 0xfff;
628 d2 = insn2 & 0xfff;
629 switch (insn & 0xf00) {
630 case 0x200:
631 helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
632 break;
633 case 0x500:
634 cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
635 break;
636 case 0x700:
637 cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
638 break;
639 case 0xc00:
640 helper_tr(l, get_address(0, b1, d1), get_address(0, b2, d2));
641 break;
642 default:
643 goto abort;
644 break;
646 } else if ((insn & 0xff00) == 0x0a00) {
647 /* supervisor call */
648 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
649 env->psw.addr = ret - 4;
650 env->int_svc_code = (insn|v1) & 0xff;
651 env->int_svc_ilc = 4;
652 helper_exception(EXCP_SVC);
653 } else if ((insn & 0xff00) == 0xbf00) {
654 uint32_t insn2, r1, r3, b2, d2;
655 insn2 = ldl_code(addr + 2);
656 r1 = (insn2 >> 20) & 0xf;
657 r3 = (insn2 >> 16) & 0xf;
658 b2 = (insn2 >> 12) & 0xf;
659 d2 = insn2 & 0xfff;
660 cc = helper_icm(r1, get_address(0, b2, d2), r3);
661 } else {
662 abort:
663 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
664 insn);
666 return cc;
669 /* absolute value 32-bit */
670 uint32_t HELPER(abs_i32)(int32_t val)
672 if (val < 0) {
673 return -val;
674 } else {
675 return val;
679 /* negative absolute value 32-bit */
680 int32_t HELPER(nabs_i32)(int32_t val)
682 if (val < 0) {
683 return val;
684 } else {
685 return -val;
689 /* absolute value 64-bit */
690 uint64_t HELPER(abs_i64)(int64_t val)
692 HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
694 if (val < 0) {
695 return -val;
696 } else {
697 return val;
701 /* negative absolute value 64-bit */
702 int64_t HELPER(nabs_i64)(int64_t val)
704 if (val < 0) {
705 return val;
706 } else {
707 return -val;
711 /* add with carry 32-bit unsigned */
712 uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
714 uint32_t res;
716 res = v1 + v2;
717 if (cc & 2) {
718 res++;
721 return res;
724 /* store character under mask high operates on the upper half of r1 */
725 void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
727 int pos = 56; /* top of the upper half of r1 */
729 while (mask) {
730 if (mask & 8) {
731 stb(address, (env->regs[r1] >> pos) & 0xff);
732 address++;
734 mask = (mask << 1) & 0xf;
735 pos -= 8;
739 /* insert character under mask high; same as icm, but operates on the
740 upper half of r1 */
741 uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
743 int pos = 56; /* top of the upper half of r1 */
744 uint64_t rmask = 0xff00000000000000ULL;
745 uint8_t val = 0;
746 int ccd = 0;
747 uint32_t cc = 0;
749 while (mask) {
750 if (mask & 8) {
751 env->regs[r1] &= ~rmask;
752 val = ldub(address);
753 if ((val & 0x80) && !ccd) {
754 cc = 1;
756 ccd = 1;
757 if (val && cc == 0) {
758 cc = 2;
760 env->regs[r1] |= (uint64_t)val << pos;
761 address++;
763 mask = (mask << 1) & 0xf;
764 pos -= 8;
765 rmask >>= 8;
768 return cc;
771 /* insert psw mask and condition code into r1 */
772 void HELPER(ipm)(uint32_t cc, uint32_t r1)
774 uint64_t r = env->regs[r1];
776 r &= 0xffffffff00ffffffULL;
777 r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
778 env->regs[r1] = r;
779 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
780 cc, env->psw.mask, r);
783 /* load access registers r1 to r3 from memory at a2 */
784 void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
786 int i;
788 for (i = r1;; i = (i + 1) % 16) {
789 env->aregs[i] = ldl(a2);
790 a2 += 4;
792 if (i == r3) {
793 break;
798 /* store access registers r1 to r3 in memory at a2 */
799 void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
801 int i;
803 for (i = r1;; i = (i + 1) % 16) {
804 stl(a2, env->aregs[i]);
805 a2 += 4;
807 if (i == r3) {
808 break;
813 /* move long */
814 uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
816 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
817 uint64_t dest = get_address_31fix(r1);
818 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
819 uint64_t src = get_address_31fix(r2);
820 uint8_t pad = src >> 24;
821 uint8_t v;
822 uint32_t cc;
824 if (destlen == srclen) {
825 cc = 0;
826 } else if (destlen < srclen) {
827 cc = 1;
828 } else {
829 cc = 2;
832 if (srclen > destlen) {
833 srclen = destlen;
836 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
837 v = ldub(src);
838 stb(dest, v);
841 for (; destlen; dest++, destlen--) {
842 stb(dest, pad);
845 env->regs[r1 + 1] = destlen;
846 /* can't use srclen here, we trunc'ed it */
847 env->regs[r2 + 1] -= src - env->regs[r2];
848 env->regs[r1] = dest;
849 env->regs[r2] = src;
851 return cc;
854 /* move long extended another memcopy insn with more bells and whistles */
855 uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
857 uint64_t destlen = env->regs[r1 + 1];
858 uint64_t dest = env->regs[r1];
859 uint64_t srclen = env->regs[r3 + 1];
860 uint64_t src = env->regs[r3];
861 uint8_t pad = a2 & 0xff;
862 uint8_t v;
863 uint32_t cc;
865 if (!(env->psw.mask & PSW_MASK_64)) {
866 destlen = (uint32_t)destlen;
867 srclen = (uint32_t)srclen;
868 dest &= 0x7fffffff;
869 src &= 0x7fffffff;
872 if (destlen == srclen) {
873 cc = 0;
874 } else if (destlen < srclen) {
875 cc = 1;
876 } else {
877 cc = 2;
880 if (srclen > destlen) {
881 srclen = destlen;
884 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
885 v = ldub(src);
886 stb(dest, v);
889 for (; destlen; dest++, destlen--) {
890 stb(dest, pad);
893 env->regs[r1 + 1] = destlen;
894 /* can't use srclen here, we trunc'ed it */
895 /* FIXME: 31-bit mode! */
896 env->regs[r3 + 1] -= src - env->regs[r3];
897 env->regs[r1] = dest;
898 env->regs[r3] = src;
900 return cc;
903 /* compare logical long extended memcompare insn with padding */
904 uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
906 uint64_t destlen = env->regs[r1 + 1];
907 uint64_t dest = get_address_31fix(r1);
908 uint64_t srclen = env->regs[r3 + 1];
909 uint64_t src = get_address_31fix(r3);
910 uint8_t pad = a2 & 0xff;
911 uint8_t v1 = 0,v2 = 0;
912 uint32_t cc = 0;
914 if (!(destlen || srclen)) {
915 return cc;
918 if (srclen > destlen) {
919 srclen = destlen;
922 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
923 v1 = srclen ? ldub(src) : pad;
924 v2 = destlen ? ldub(dest) : pad;
925 if (v1 != v2) {
926 cc = (v1 < v2) ? 1 : 2;
927 break;
931 env->regs[r1 + 1] = destlen;
932 /* can't use srclen here, we trunc'ed it */
933 env->regs[r3 + 1] -= src - env->regs[r3];
934 env->regs[r1] = dest;
935 env->regs[r3] = src;
937 return cc;
940 /* subtract unsigned v2 from v1 with borrow */
941 uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
943 uint32_t v1 = env->regs[r1];
944 uint32_t res = v1 + (~v2) + (cc >> 1);
946 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
947 if (cc & 2) {
948 /* borrow */
949 return v1 ? 1 : 0;
950 } else {
951 return v1 ? 3 : 2;
955 /* subtract unsigned v2 from v1 with borrow */
956 uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
958 uint64_t res = v1 + (~v2) + (cc >> 1);
960 env->regs[r1] = res;
961 if (cc & 2) {
962 /* borrow */
963 return v1 ? 1 : 0;
964 } else {
965 return v1 ? 3 : 2;
969 static inline int float_comp_to_cc(int float_compare)
971 switch (float_compare) {
972 case float_relation_equal:
973 return 0;
974 case float_relation_less:
975 return 1;
976 case float_relation_greater:
977 return 2;
978 case float_relation_unordered:
979 return 3;
980 default:
981 cpu_abort(env, "unknown return value for float compare\n");
985 /* condition codes for binary FP ops */
986 static uint32_t set_cc_f32(float32 v1, float32 v2)
988 return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
991 static uint32_t set_cc_f64(float64 v1, float64 v2)
993 return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
996 /* condition codes for unary FP ops */
997 static uint32_t set_cc_nz_f32(float32 v)
999 if (float32_is_any_nan(v)) {
1000 return 3;
1001 } else if (float32_is_zero(v)) {
1002 return 0;
1003 } else if (float32_is_neg(v)) {
1004 return 1;
1005 } else {
1006 return 2;
1010 static uint32_t set_cc_nz_f64(float64 v)
1012 if (float64_is_any_nan(v)) {
1013 return 3;
1014 } else if (float64_is_zero(v)) {
1015 return 0;
1016 } else if (float64_is_neg(v)) {
1017 return 1;
1018 } else {
1019 return 2;
1023 static uint32_t set_cc_nz_f128(float128 v)
1025 if (float128_is_any_nan(v)) {
1026 return 3;
1027 } else if (float128_is_zero(v)) {
1028 return 0;
1029 } else if (float128_is_neg(v)) {
1030 return 1;
1031 } else {
1032 return 2;
1036 /* convert 32-bit int to 64-bit float */
1037 void HELPER(cdfbr)(uint32_t f1, int32_t v2)
1039 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
1040 env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
1043 /* convert 32-bit int to 128-bit float */
1044 void HELPER(cxfbr)(uint32_t f1, int32_t v2)
1046 CPU_QuadU v1;
1047 v1.q = int32_to_float128(v2, &env->fpu_status);
1048 env->fregs[f1].ll = v1.ll.upper;
1049 env->fregs[f1 + 2].ll = v1.ll.lower;
1052 /* convert 64-bit int to 32-bit float */
1053 void HELPER(cegbr)(uint32_t f1, int64_t v2)
1055 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1056 env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
1059 /* convert 64-bit int to 64-bit float */
1060 void HELPER(cdgbr)(uint32_t f1, int64_t v2)
1062 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1063 env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
1066 /* convert 64-bit int to 128-bit float */
1067 void HELPER(cxgbr)(uint32_t f1, int64_t v2)
1069 CPU_QuadU x1;
1070 x1.q = int64_to_float128(v2, &env->fpu_status);
1071 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
1072 x1.ll.upper, x1.ll.lower);
1073 env->fregs[f1].ll = x1.ll.upper;
1074 env->fregs[f1 + 2].ll = x1.ll.lower;
1077 /* convert 32-bit int to 32-bit float */
1078 void HELPER(cefbr)(uint32_t f1, int32_t v2)
1080 env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
1081 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
1082 env->fregs[f1].l.upper, f1);
1085 /* 32-bit FP addition RR */
1086 uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
1088 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1089 env->fregs[f2].l.upper,
1090 &env->fpu_status);
1091 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1092 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1094 return set_cc_nz_f32(env->fregs[f1].l.upper);
1097 /* 64-bit FP addition RR */
1098 uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
1100 env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
1101 &env->fpu_status);
1102 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
1103 env->fregs[f2].d, env->fregs[f1].d, f1);
1105 return set_cc_nz_f64(env->fregs[f1].d);
1108 /* 32-bit FP subtraction RR */
1109 uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
1111 env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
1112 env->fregs[f2].l.upper,
1113 &env->fpu_status);
1114 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1115 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1117 return set_cc_nz_f32(env->fregs[f1].l.upper);
1120 /* 64-bit FP subtraction RR */
1121 uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
1123 env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
1124 &env->fpu_status);
1125 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1126 __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
1128 return set_cc_nz_f64(env->fregs[f1].d);
1131 /* 32-bit FP division RR */
1132 void HELPER(debr)(uint32_t f1, uint32_t f2)
1134 env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
1135 env->fregs[f2].l.upper,
1136 &env->fpu_status);
1139 /* 128-bit FP division RR */
1140 void HELPER(dxbr)(uint32_t f1, uint32_t f2)
1142 CPU_QuadU v1;
1143 v1.ll.upper = env->fregs[f1].ll;
1144 v1.ll.lower = env->fregs[f1 + 2].ll;
1145 CPU_QuadU v2;
1146 v2.ll.upper = env->fregs[f2].ll;
1147 v2.ll.lower = env->fregs[f2 + 2].ll;
1148 CPU_QuadU res;
1149 res.q = float128_div(v1.q, v2.q, &env->fpu_status);
1150 env->fregs[f1].ll = res.ll.upper;
1151 env->fregs[f1 + 2].ll = res.ll.lower;
1154 /* 64-bit FP multiplication RR */
1155 void HELPER(mdbr)(uint32_t f1, uint32_t f2)
1157 env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
1158 &env->fpu_status);
1161 /* 128-bit FP multiplication RR */
1162 void HELPER(mxbr)(uint32_t f1, uint32_t f2)
1164 CPU_QuadU v1;
1165 v1.ll.upper = env->fregs[f1].ll;
1166 v1.ll.lower = env->fregs[f1 + 2].ll;
1167 CPU_QuadU v2;
1168 v2.ll.upper = env->fregs[f2].ll;
1169 v2.ll.lower = env->fregs[f2 + 2].ll;
1170 CPU_QuadU res;
1171 res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
1172 env->fregs[f1].ll = res.ll.upper;
1173 env->fregs[f1 + 2].ll = res.ll.lower;
1176 /* convert 32-bit float to 64-bit float */
1177 void HELPER(ldebr)(uint32_t r1, uint32_t r2)
1179 env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
1180 &env->fpu_status);
1183 /* convert 128-bit float to 64-bit float */
1184 void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
1186 CPU_QuadU x2;
1187 x2.ll.upper = env->fregs[f2].ll;
1188 x2.ll.lower = env->fregs[f2 + 2].ll;
1189 env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
1190 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
1193 /* convert 64-bit float to 128-bit float */
1194 void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
1196 CPU_QuadU res;
1197 res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
1198 env->fregs[f1].ll = res.ll.upper;
1199 env->fregs[f1 + 2].ll = res.ll.lower;
1202 /* convert 64-bit float to 32-bit float */
1203 void HELPER(ledbr)(uint32_t f1, uint32_t f2)
1205 float64 d2 = env->fregs[f2].d;
1206 env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
1209 /* convert 128-bit float to 32-bit float */
1210 void HELPER(lexbr)(uint32_t f1, uint32_t f2)
1212 CPU_QuadU x2;
1213 x2.ll.upper = env->fregs[f2].ll;
1214 x2.ll.lower = env->fregs[f2 + 2].ll;
1215 env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
1216 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
1219 /* absolute value of 32-bit float */
1220 uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
1222 float32 v1;
1223 float32 v2 = env->fregs[f2].d;
1224 v1 = float32_abs(v2);
1225 env->fregs[f1].d = v1;
1226 return set_cc_nz_f32(v1);
1229 /* absolute value of 64-bit float */
1230 uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
1232 float64 v1;
1233 float64 v2 = env->fregs[f2].d;
1234 v1 = float64_abs(v2);
1235 env->fregs[f1].d = v1;
1236 return set_cc_nz_f64(v1);
1239 /* absolute value of 128-bit float */
1240 uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
1242 CPU_QuadU v1;
1243 CPU_QuadU v2;
1244 v2.ll.upper = env->fregs[f2].ll;
1245 v2.ll.lower = env->fregs[f2 + 2].ll;
1246 v1.q = float128_abs(v2.q);
1247 env->fregs[f1].ll = v1.ll.upper;
1248 env->fregs[f1 + 2].ll = v1.ll.lower;
1249 return set_cc_nz_f128(v1.q);
1252 /* load and test 64-bit float */
1253 uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
1255 env->fregs[f1].d = env->fregs[f2].d;
1256 return set_cc_nz_f64(env->fregs[f1].d);
1259 /* load and test 32-bit float */
1260 uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
1262 env->fregs[f1].l.upper = env->fregs[f2].l.upper;
1263 return set_cc_nz_f32(env->fregs[f1].l.upper);
1266 /* load and test 128-bit float */
1267 uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
1269 CPU_QuadU x;
1270 x.ll.upper = env->fregs[f2].ll;
1271 x.ll.lower = env->fregs[f2 + 2].ll;
1272 env->fregs[f1].ll = x.ll.upper;
1273 env->fregs[f1 + 2].ll = x.ll.lower;
1274 return set_cc_nz_f128(x.q);
1277 /* load complement of 32-bit float */
1278 uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
1280 env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
1282 return set_cc_nz_f32(env->fregs[f1].l.upper);
1285 /* load complement of 64-bit float */
1286 uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
1288 env->fregs[f1].d = float64_chs(env->fregs[f2].d);
1290 return set_cc_nz_f64(env->fregs[f1].d);
1293 /* load complement of 128-bit float */
1294 uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
1296 CPU_QuadU x1, x2;
1297 x2.ll.upper = env->fregs[f2].ll;
1298 x2.ll.lower = env->fregs[f2 + 2].ll;
1299 x1.q = float128_chs(x2.q);
1300 env->fregs[f1].ll = x1.ll.upper;
1301 env->fregs[f1 + 2].ll = x1.ll.lower;
1302 return set_cc_nz_f128(x1.q);
1305 /* 32-bit FP addition RM */
1306 void HELPER(aeb)(uint32_t f1, uint32_t val)
1308 float32 v1 = env->fregs[f1].l.upper;
1309 CPU_FloatU v2;
1310 v2.l = val;
1311 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
1312 v1, f1, v2.f);
1313 env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
1316 /* 32-bit FP division RM */
1317 void HELPER(deb)(uint32_t f1, uint32_t val)
1319 float32 v1 = env->fregs[f1].l.upper;
1320 CPU_FloatU v2;
1321 v2.l = val;
1322 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
1323 v1, f1, v2.f);
1324 env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
1327 /* 32-bit FP multiplication RM */
1328 void HELPER(meeb)(uint32_t f1, uint32_t val)
1330 float32 v1 = env->fregs[f1].l.upper;
1331 CPU_FloatU v2;
1332 v2.l = val;
1333 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
1334 v1, f1, v2.f);
1335 env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
1338 /* 32-bit FP compare RR */
1339 uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
1341 float32 v1 = env->fregs[f1].l.upper;
1342 float32 v2 = env->fregs[f2].l.upper;
1343 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
1344 v1, f1, v2);
1345 return set_cc_f32(v1, v2);
1348 /* 64-bit FP compare RR */
1349 uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
1351 float64 v1 = env->fregs[f1].d;
1352 float64 v2 = env->fregs[f2].d;
1353 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
1354 v1, f1, v2);
1355 return set_cc_f64(v1, v2);
1358 /* 128-bit FP compare RR */
1359 uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
1361 CPU_QuadU v1;
1362 v1.ll.upper = env->fregs[f1].ll;
1363 v1.ll.lower = env->fregs[f1 + 2].ll;
1364 CPU_QuadU v2;
1365 v2.ll.upper = env->fregs[f2].ll;
1366 v2.ll.lower = env->fregs[f2 + 2].ll;
1368 return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
1369 &env->fpu_status));
1372 /* 64-bit FP compare RM */
1373 uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
1375 float64 v1 = env->fregs[f1].d;
1376 CPU_DoubleU v2;
1377 v2.ll = ldq(a2);
1378 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
1379 f1, v2.d);
1380 return set_cc_f64(v1, v2.d);
1383 /* 64-bit FP addition RM */
1384 uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
1386 float64 v1 = env->fregs[f1].d;
1387 CPU_DoubleU v2;
1388 v2.ll = ldq(a2);
1389 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
1390 v1, f1, v2.d);
1391 env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
1392 return set_cc_nz_f64(v1);
1395 /* 32-bit FP subtraction RM */
1396 void HELPER(seb)(uint32_t f1, uint32_t val)
1398 float32 v1 = env->fregs[f1].l.upper;
1399 CPU_FloatU v2;
1400 v2.l = val;
1401 env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
1404 /* 64-bit FP subtraction RM */
1405 uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
1407 float64 v1 = env->fregs[f1].d;
1408 CPU_DoubleU v2;
1409 v2.ll = ldq(a2);
1410 env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
1411 return set_cc_nz_f64(v1);
1414 /* 64-bit FP multiplication RM */
1415 void HELPER(mdb)(uint32_t f1, uint64_t a2)
1417 float64 v1 = env->fregs[f1].d;
1418 CPU_DoubleU v2;
1419 v2.ll = ldq(a2);
1420 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
1421 v1, f1, v2.d);
1422 env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
1425 /* 64-bit FP division RM */
1426 void HELPER(ddb)(uint32_t f1, uint64_t a2)
1428 float64 v1 = env->fregs[f1].d;
1429 CPU_DoubleU v2;
1430 v2.ll = ldq(a2);
1431 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
1432 v1, f1, v2.d);
1433 env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
1436 static void set_round_mode(int m3)
1438 switch (m3) {
1439 case 0:
1440 /* current mode */
1441 break;
1442 case 1:
1443 /* biased round no nearest */
1444 case 4:
1445 /* round to nearest */
1446 set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
1447 break;
1448 case 5:
1449 /* round to zero */
1450 set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
1451 break;
1452 case 6:
1453 /* round to +inf */
1454 set_float_rounding_mode(float_round_up, &env->fpu_status);
1455 break;
1456 case 7:
1457 /* round to -inf */
1458 set_float_rounding_mode(float_round_down, &env->fpu_status);
1459 break;
1463 /* convert 32-bit float to 64-bit int */
1464 uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1466 float32 v2 = env->fregs[f2].l.upper;
1467 set_round_mode(m3);
1468 env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
1469 return set_cc_nz_f32(v2);
1472 /* convert 64-bit float to 64-bit int */
1473 uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1475 float64 v2 = env->fregs[f2].d;
1476 set_round_mode(m3);
1477 env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
1478 return set_cc_nz_f64(v2);
1481 /* convert 128-bit float to 64-bit int */
1482 uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1484 CPU_QuadU v2;
1485 v2.ll.upper = env->fregs[f2].ll;
1486 v2.ll.lower = env->fregs[f2 + 2].ll;
1487 set_round_mode(m3);
1488 env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
1489 if (float128_is_any_nan(v2.q)) {
1490 return 3;
1491 } else if (float128_is_zero(v2.q)) {
1492 return 0;
1493 } else if (float128_is_neg(v2.q)) {
1494 return 1;
1495 } else {
1496 return 2;
1500 /* convert 32-bit float to 32-bit int */
1501 uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1503 float32 v2 = env->fregs[f2].l.upper;
1504 set_round_mode(m3);
1505 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1506 float32_to_int32(v2, &env->fpu_status);
1507 return set_cc_nz_f32(v2);
1510 /* convert 64-bit float to 32-bit int */
1511 uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1513 float64 v2 = env->fregs[f2].d;
1514 set_round_mode(m3);
1515 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1516 float64_to_int32(v2, &env->fpu_status);
1517 return set_cc_nz_f64(v2);
1520 /* convert 128-bit float to 32-bit int */
1521 uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1523 CPU_QuadU v2;
1524 v2.ll.upper = env->fregs[f2].ll;
1525 v2.ll.lower = env->fregs[f2 + 2].ll;
1526 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1527 float128_to_int32(v2.q, &env->fpu_status);
1528 return set_cc_nz_f128(v2.q);
1531 /* load 32-bit FP zero */
1532 void HELPER(lzer)(uint32_t f1)
1534 env->fregs[f1].l.upper = float32_zero;
1537 /* load 64-bit FP zero */
1538 void HELPER(lzdr)(uint32_t f1)
1540 env->fregs[f1].d = float64_zero;
1543 /* load 128-bit FP zero */
1544 void HELPER(lzxr)(uint32_t f1)
1546 CPU_QuadU x;
1547 x.q = float64_to_float128(float64_zero, &env->fpu_status);
1548 env->fregs[f1].ll = x.ll.upper;
1549 env->fregs[f1 + 1].ll = x.ll.lower;
1552 /* 128-bit FP subtraction RR */
1553 uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
1555 CPU_QuadU v1;
1556 v1.ll.upper = env->fregs[f1].ll;
1557 v1.ll.lower = env->fregs[f1 + 2].ll;
1558 CPU_QuadU v2;
1559 v2.ll.upper = env->fregs[f2].ll;
1560 v2.ll.lower = env->fregs[f2 + 2].ll;
1561 CPU_QuadU res;
1562 res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
1563 env->fregs[f1].ll = res.ll.upper;
1564 env->fregs[f1 + 2].ll = res.ll.lower;
1565 return set_cc_nz_f128(res.q);
1568 /* 128-bit FP addition RR */
1569 uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
1571 CPU_QuadU v1;
1572 v1.ll.upper = env->fregs[f1].ll;
1573 v1.ll.lower = env->fregs[f1 + 2].ll;
1574 CPU_QuadU v2;
1575 v2.ll.upper = env->fregs[f2].ll;
1576 v2.ll.lower = env->fregs[f2 + 2].ll;
1577 CPU_QuadU res;
1578 res.q = float128_add(v1.q, v2.q, &env->fpu_status);
1579 env->fregs[f1].ll = res.ll.upper;
1580 env->fregs[f1 + 2].ll = res.ll.lower;
1581 return set_cc_nz_f128(res.q);
1584 /* 32-bit FP multiplication RR */
1585 void HELPER(meebr)(uint32_t f1, uint32_t f2)
1587 env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
1588 env->fregs[f2].l.upper,
1589 &env->fpu_status);
1592 /* 64-bit FP division RR */
1593 void HELPER(ddbr)(uint32_t f1, uint32_t f2)
1595 env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
1596 &env->fpu_status);
1599 /* 64-bit FP multiply and add RM */
1600 void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
1602 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
1603 CPU_DoubleU v2;
1604 v2.ll = ldq(a2);
1605 env->fregs[f1].d = float64_add(env->fregs[f1].d,
1606 float64_mul(v2.d, env->fregs[f3].d,
1607 &env->fpu_status),
1608 &env->fpu_status);
1611 /* 64-bit FP multiply and add RR */
1612 void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1614 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1615 env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
1616 env->fregs[f3].d,
1617 &env->fpu_status),
1618 env->fregs[f1].d, &env->fpu_status);
1621 /* 64-bit FP multiply and subtract RR */
1622 void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1624 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1625 env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
1626 env->fregs[f3].d,
1627 &env->fpu_status),
1628 env->fregs[f1].d, &env->fpu_status);
1631 /* 32-bit FP multiply and add RR */
1632 void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
1634 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1635 float32_mul(env->fregs[f2].l.upper,
1636 env->fregs[f3].l.upper,
1637 &env->fpu_status),
1638 &env->fpu_status);
1641 /* convert 32-bit float to 64-bit float */
1642 void HELPER(ldeb)(uint32_t f1, uint64_t a2)
1644 uint32_t v2;
1645 v2 = ldl(a2);
1646 env->fregs[f1].d = float32_to_float64(v2,
1647 &env->fpu_status);
1650 /* convert 64-bit float to 128-bit float */
1651 void HELPER(lxdb)(uint32_t f1, uint64_t a2)
1653 CPU_DoubleU v2;
1654 v2.ll = ldq(a2);
1655 CPU_QuadU v1;
1656 v1.q = float64_to_float128(v2.d, &env->fpu_status);
1657 env->fregs[f1].ll = v1.ll.upper;
1658 env->fregs[f1 + 2].ll = v1.ll.lower;
1661 /* test data class 32-bit */
1662 uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
1664 float32 v1 = env->fregs[f1].l.upper;
1665 int neg = float32_is_neg(v1);
1666 uint32_t cc = 0;
1668 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
1669 if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1670 (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1671 (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1672 (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1673 cc = 1;
1674 } else if (m2 & (1 << (9-neg))) {
1675 /* assume normalized number */
1676 cc = 1;
1679 /* FIXME: denormalized? */
1680 return cc;
1683 /* test data class 64-bit */
1684 uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
1686 float64 v1 = env->fregs[f1].d;
1687 int neg = float64_is_neg(v1);
1688 uint32_t cc = 0;
1690 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
1691 if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1692 (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1693 (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1694 (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1695 cc = 1;
1696 } else if (m2 & (1 << (9-neg))) {
1697 /* assume normalized number */
1698 cc = 1;
1700 /* FIXME: denormalized? */
1701 return cc;
1704 /* test data class 128-bit */
1705 uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
1707 CPU_QuadU v1;
1708 uint32_t cc = 0;
1709 v1.ll.upper = env->fregs[f1].ll;
1710 v1.ll.lower = env->fregs[f1 + 2].ll;
1712 int neg = float128_is_neg(v1.q);
1713 if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
1714 (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
1715 (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
1716 (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
1717 cc = 1;
1718 } else if (m2 & (1 << (9-neg))) {
1719 /* assume normalized number */
1720 cc = 1;
1722 /* FIXME: denormalized? */
1723 return cc;
1726 /* find leftmost one */
1727 uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
1729 uint64_t res = 0;
1730 uint64_t ov2 = v2;
1732 while (!(v2 & 0x8000000000000000ULL) && v2) {
1733 v2 <<= 1;
1734 res++;
1737 if (!v2) {
1738 env->regs[r1] = 64;
1739 env->regs[r1 + 1] = 0;
1740 return 0;
1741 } else {
1742 env->regs[r1] = res;
1743 env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
1744 return 2;
1748 /* square root 64-bit RR */
1749 void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
1751 env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
1754 /* checksum */
1755 void HELPER(cksm)(uint32_t r1, uint32_t r2)
1757 uint64_t src = get_address_31fix(r2);
1758 uint64_t src_len = env->regs[(r2 + 1) & 15];
1759 uint64_t cksm = (uint32_t)env->regs[r1];
1761 while (src_len >= 4) {
1762 cksm += ldl(src);
1764 /* move to next word */
1765 src_len -= 4;
1766 src += 4;
1769 switch (src_len) {
1770 case 0:
1771 break;
1772 case 1:
1773 cksm += ldub(src) << 24;
1774 break;
1775 case 2:
1776 cksm += lduw(src) << 16;
1777 break;
1778 case 3:
1779 cksm += lduw(src) << 16;
1780 cksm += ldub(src + 2) << 8;
1781 break;
1784 /* indicate we've processed everything */
1785 env->regs[r2] = src + src_len;
1786 env->regs[(r2 + 1) & 15] = 0;
1788 /* store result */
1789 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1790 ((uint32_t)cksm + (cksm >> 32));
1793 static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src,
1794 int32_t dst)
1796 if (src == dst) {
1797 return 0;
1798 } else if (src < dst) {
1799 return 1;
1800 } else {
1801 return 2;
1805 static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst)
1807 return cc_calc_ltgt_32(env, dst, 0);
1810 static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src,
1811 int64_t dst)
1813 if (src == dst) {
1814 return 0;
1815 } else if (src < dst) {
1816 return 1;
1817 } else {
1818 return 2;
1822 static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst)
1824 return cc_calc_ltgt_64(env, dst, 0);
1827 static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src,
1828 uint32_t dst)
1830 if (src == dst) {
1831 return 0;
1832 } else if (src < dst) {
1833 return 1;
1834 } else {
1835 return 2;
1839 static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src,
1840 uint64_t dst)
1842 if (src == dst) {
1843 return 0;
1844 } else if (src < dst) {
1845 return 1;
1846 } else {
1847 return 2;
1851 static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask)
1853 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
1854 uint16_t r = val & mask;
1855 if (r == 0 || mask == 0) {
1856 return 0;
1857 } else if (r == mask) {
1858 return 3;
1859 } else {
1860 return 1;
1864 /* set condition code for test under mask */
1865 static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask)
1867 uint16_t r = val & mask;
1868 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
1869 if (r == 0 || mask == 0) {
1870 return 0;
1871 } else if (r == mask) {
1872 return 3;
1873 } else {
1874 while (!(mask & 0x8000)) {
1875 mask <<= 1;
1876 val <<= 1;
1878 if (val & 0x8000) {
1879 return 2;
1880 } else {
1881 return 1;
1886 static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst)
1888 return !!dst;
1891 static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2,
1892 int64_t ar)
1894 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1895 return 3; /* overflow */
1896 } else {
1897 if (ar < 0) {
1898 return 1;
1899 } else if (ar > 0) {
1900 return 2;
1901 } else {
1902 return 0;
1907 static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2,
1908 uint64_t ar)
1910 if (ar == 0) {
1911 if (a1) {
1912 return 2;
1913 } else {
1914 return 0;
1916 } else {
1917 if (ar < a1 || ar < a2) {
1918 return 3;
1919 } else {
1920 return 1;
1925 static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2,
1926 int64_t ar)
1928 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
1929 return 3; /* overflow */
1930 } else {
1931 if (ar < 0) {
1932 return 1;
1933 } else if (ar > 0) {
1934 return 2;
1935 } else {
1936 return 0;
1941 static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2,
1942 uint64_t ar)
1944 if (ar == 0) {
1945 return 2;
1946 } else {
1947 if (a2 > a1) {
1948 return 1;
1949 } else {
1950 return 3;
1955 static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst)
1957 if ((uint64_t)dst == 0x8000000000000000ULL) {
1958 return 3;
1959 } else if (dst) {
1960 return 1;
1961 } else {
1962 return 0;
1966 static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst)
1968 return !!dst;
1971 static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst)
1973 if ((uint64_t)dst == 0x8000000000000000ULL) {
1974 return 3;
1975 } else if (dst < 0) {
1976 return 1;
1977 } else if (dst > 0) {
1978 return 2;
1979 } else {
1980 return 0;
1985 static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2,
1986 int32_t ar)
1988 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1989 return 3; /* overflow */
1990 } else {
1991 if (ar < 0) {
1992 return 1;
1993 } else if (ar > 0) {
1994 return 2;
1995 } else {
1996 return 0;
2001 static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2,
2002 uint32_t ar)
2004 if (ar == 0) {
2005 if (a1) {
2006 return 2;
2007 } else {
2008 return 0;
2010 } else {
2011 if (ar < a1 || ar < a2) {
2012 return 3;
2013 } else {
2014 return 1;
2019 static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2,
2020 int32_t ar)
2022 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
2023 return 3; /* overflow */
2024 } else {
2025 if (ar < 0) {
2026 return 1;
2027 } else if (ar > 0) {
2028 return 2;
2029 } else {
2030 return 0;
2035 static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2,
2036 uint32_t ar)
2038 if (ar == 0) {
2039 return 2;
2040 } else {
2041 if (a2 > a1) {
2042 return 1;
2043 } else {
2044 return 3;
2049 static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst)
2051 if ((uint32_t)dst == 0x80000000UL) {
2052 return 3;
2053 } else if (dst) {
2054 return 1;
2055 } else {
2056 return 0;
2060 static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst)
2062 return !!dst;
2065 static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst)
2067 if ((uint32_t)dst == 0x80000000UL) {
2068 return 3;
2069 } else if (dst < 0) {
2070 return 1;
2071 } else if (dst > 0) {
2072 return 2;
2073 } else {
2074 return 0;
2078 /* calculate condition code for insert character under mask insn */
2079 static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val)
2081 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
2082 uint32_t cc;
2084 if (mask == 0xf) {
2085 if (!val) {
2086 return 0;
2087 } else if (val & 0x80000000) {
2088 return 1;
2089 } else {
2090 return 2;
2094 if (!val || !mask) {
2095 cc = 0;
2096 } else {
2097 while (mask != 1) {
2098 mask >>= 1;
2099 val >>= 8;
2101 if (val & 0x80) {
2102 cc = 1;
2103 } else {
2104 cc = 2;
2107 return cc;
2110 static inline uint32_t cc_calc_slag(CPUState *env, uint64_t src, uint64_t shift)
2112 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
2113 uint64_t match, r;
2115 /* check if the sign bit stays the same */
2116 if (src & (1ULL << 63)) {
2117 match = mask;
2118 } else {
2119 match = 0;
2122 if ((src & mask) != match) {
2123 /* overflow */
2124 return 3;
2127 r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
2129 if ((int64_t)r == 0) {
2130 return 0;
2131 } else if ((int64_t)r < 0) {
2132 return 1;
2135 return 2;
2139 static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src,
2140 uint64_t dst, uint64_t vr)
2142 uint32_t r = 0;
2144 switch (cc_op) {
2145 case CC_OP_CONST0:
2146 case CC_OP_CONST1:
2147 case CC_OP_CONST2:
2148 case CC_OP_CONST3:
2149 /* cc_op value _is_ cc */
2150 r = cc_op;
2151 break;
2152 case CC_OP_LTGT0_32:
2153 r = cc_calc_ltgt0_32(env, dst);
2154 break;
2155 case CC_OP_LTGT0_64:
2156 r = cc_calc_ltgt0_64(env, dst);
2157 break;
2158 case CC_OP_LTGT_32:
2159 r = cc_calc_ltgt_32(env, src, dst);
2160 break;
2161 case CC_OP_LTGT_64:
2162 r = cc_calc_ltgt_64(env, src, dst);
2163 break;
2164 case CC_OP_LTUGTU_32:
2165 r = cc_calc_ltugtu_32(env, src, dst);
2166 break;
2167 case CC_OP_LTUGTU_64:
2168 r = cc_calc_ltugtu_64(env, src, dst);
2169 break;
2170 case CC_OP_TM_32:
2171 r = cc_calc_tm_32(env, src, dst);
2172 break;
2173 case CC_OP_TM_64:
2174 r = cc_calc_tm_64(env, src, dst);
2175 break;
2176 case CC_OP_NZ:
2177 r = cc_calc_nz(env, dst);
2178 break;
2179 case CC_OP_ADD_64:
2180 r = cc_calc_add_64(env, src, dst, vr);
2181 break;
2182 case CC_OP_ADDU_64:
2183 r = cc_calc_addu_64(env, src, dst, vr);
2184 break;
2185 case CC_OP_SUB_64:
2186 r = cc_calc_sub_64(env, src, dst, vr);
2187 break;
2188 case CC_OP_SUBU_64:
2189 r = cc_calc_subu_64(env, src, dst, vr);
2190 break;
2191 case CC_OP_ABS_64:
2192 r = cc_calc_abs_64(env, dst);
2193 break;
2194 case CC_OP_NABS_64:
2195 r = cc_calc_nabs_64(env, dst);
2196 break;
2197 case CC_OP_COMP_64:
2198 r = cc_calc_comp_64(env, dst);
2199 break;
2201 case CC_OP_ADD_32:
2202 r = cc_calc_add_32(env, src, dst, vr);
2203 break;
2204 case CC_OP_ADDU_32:
2205 r = cc_calc_addu_32(env, src, dst, vr);
2206 break;
2207 case CC_OP_SUB_32:
2208 r = cc_calc_sub_32(env, src, dst, vr);
2209 break;
2210 case CC_OP_SUBU_32:
2211 r = cc_calc_subu_32(env, src, dst, vr);
2212 break;
2213 case CC_OP_ABS_32:
2214 r = cc_calc_abs_64(env, dst);
2215 break;
2216 case CC_OP_NABS_32:
2217 r = cc_calc_nabs_64(env, dst);
2218 break;
2219 case CC_OP_COMP_32:
2220 r = cc_calc_comp_32(env, dst);
2221 break;
2223 case CC_OP_ICM:
2224 r = cc_calc_icm_32(env, src, dst);
2225 break;
2226 case CC_OP_SLAG:
2227 r = cc_calc_slag(env, src, dst);
2228 break;
2230 case CC_OP_LTGT_F32:
2231 r = set_cc_f32(src, dst);
2232 break;
2233 case CC_OP_LTGT_F64:
2234 r = set_cc_f64(src, dst);
2235 break;
2236 case CC_OP_NZ_F32:
2237 r = set_cc_nz_f32(dst);
2238 break;
2239 case CC_OP_NZ_F64:
2240 r = set_cc_nz_f64(dst);
2241 break;
2243 default:
2244 cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
2247 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
2248 cc_name(cc_op), src, dst, vr, r);
2249 return r;
2252 uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
2253 uint64_t vr)
2255 return do_calc_cc(env, cc_op, src, dst, vr);
2258 uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
2259 uint64_t vr)
2261 return do_calc_cc(env, cc_op, src, dst, vr);
2264 uint64_t HELPER(cvd)(int32_t bin)
2266 /* positive 0 */
2267 uint64_t dec = 0x0c;
2268 int shift = 4;
2270 if (bin < 0) {
2271 bin = -bin;
2272 dec = 0x0d;
2275 for (shift = 4; (shift < 64) && bin; shift += 4) {
2276 int current_number = bin % 10;
2278 dec |= (current_number) << shift;
2279 bin /= 10;
2282 return dec;
2285 void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
2287 int len_dest = len >> 4;
2288 int len_src = len & 0xf;
2289 uint8_t b;
2290 int second_nibble = 0;
2292 dest += len_dest;
2293 src += len_src;
2295 /* last byte is special, it only flips the nibbles */
2296 b = ldub(src);
2297 stb(dest, (b << 4) | (b >> 4));
2298 src--;
2299 len_src--;
2301 /* now pad every nibble with 0xf0 */
2303 while (len_dest > 0) {
2304 uint8_t cur_byte = 0;
2306 if (len_src > 0) {
2307 cur_byte = ldub(src);
2310 len_dest--;
2311 dest--;
2313 /* only advance one nibble at a time */
2314 if (second_nibble) {
2315 cur_byte >>= 4;
2316 len_src--;
2317 src--;
2319 second_nibble = !second_nibble;
2321 /* digit */
2322 cur_byte = (cur_byte & 0xf);
2323 /* zone bits */
2324 cur_byte |= 0xf0;
2326 stb(dest, cur_byte);
2330 void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
2332 int i;
2334 for (i = 0; i <= len; i++) {
2335 uint8_t byte = ldub(array + i);
2336 uint8_t new_byte = ldub(trans + byte);
2337 stb(array + i, new_byte);
2341 #ifndef CONFIG_USER_ONLY
2343 void HELPER(load_psw)(uint64_t mask, uint64_t addr)
2345 load_psw(env, mask, addr);
2346 cpu_loop_exit(env);
2349 static void program_interrupt(CPUState *env, uint32_t code, int ilc)
2351 qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
2353 if (kvm_enabled()) {
2354 #ifdef CONFIG_KVM
2355 kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
2356 #endif
2357 } else {
2358 env->int_pgm_code = code;
2359 env->int_pgm_ilc = ilc;
2360 env->exception_index = EXCP_PGM;
2361 cpu_loop_exit(env);
2365 static void ext_interrupt(CPUState *env, int type, uint32_t param,
2366 uint64_t param64)
2368 cpu_inject_ext(env, type, param, param64);
2371 int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code)
2373 int r = 0;
2374 int shift = 0;
2376 #ifdef DEBUG_HELPER
2377 printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
2378 #endif
2380 if (sccb & ~0x7ffffff8ul) {
2381 fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
2382 r = -1;
2383 goto out;
2386 switch(code) {
2387 case SCLP_CMDW_READ_SCP_INFO:
2388 case SCLP_CMDW_READ_SCP_INFO_FORCED:
2389 while ((ram_size >> (20 + shift)) > 65535) {
2390 shift++;
2392 stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift));
2393 stb_phys(sccb + SCP_INCREMENT, 1 << shift);
2394 stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
2396 if (kvm_enabled()) {
2397 #ifdef CONFIG_KVM
2398 kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
2399 sccb & ~3, 0, 1);
2400 #endif
2401 } else {
2402 env->psw.addr += 4;
2403 ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
2405 break;
2406 default:
2407 #ifdef DEBUG_HELPER
2408 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
2409 #endif
2410 r = -1;
2411 break;
2414 out:
2415 return r;
2418 /* SCLP service call */
2419 uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
2421 if (sclp_service_call(env, r1, r2)) {
2422 return 3;
2425 return 0;
2428 /* DIAG */
2429 uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
2431 uint64_t r;
2433 switch (num) {
2434 case 0x500:
2435 /* KVM hypercall */
2436 r = s390_virtio_hypercall(env, mem, code);
2437 break;
2438 case 0x44:
2439 /* yield */
2440 r = 0;
2441 break;
2442 case 0x308:
2443 /* ipl */
2444 r = 0;
2445 break;
2446 default:
2447 r = -1;
2448 break;
2451 if (r) {
2452 program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
2455 return r;
2458 /* Store CPU ID */
2459 void HELPER(stidp)(uint64_t a1)
2461 stq(a1, env->cpu_num);
2464 /* Set Prefix */
2465 void HELPER(spx)(uint64_t a1)
2467 uint32_t prefix;
2469 prefix = ldl(a1);
2470 env->psa = prefix & 0xfffff000;
2471 qemu_log("prefix: %#x\n", prefix);
2472 tlb_flush_page(env, 0);
2473 tlb_flush_page(env, TARGET_PAGE_SIZE);
2476 /* Set Clock */
2477 uint32_t HELPER(sck)(uint64_t a1)
2479 /* XXX not implemented - is it necessary? */
2481 return 0;
2484 static inline uint64_t clock_value(CPUState *env)
2486 uint64_t time;
2488 time = env->tod_offset +
2489 time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
2491 return time;
2494 /* Store Clock */
2495 uint32_t HELPER(stck)(uint64_t a1)
2497 stq(a1, clock_value(env));
2499 return 0;
2502 /* Store Clock Extended */
2503 uint32_t HELPER(stcke)(uint64_t a1)
2505 stb(a1, 0);
2506 /* basically the same value as stck */
2507 stq(a1 + 1, clock_value(env) | env->cpu_num);
2508 /* more fine grained than stck */
2509 stq(a1 + 9, 0);
2510 /* XXX programmable fields */
2511 stw(a1 + 17, 0);
2514 return 0;
2517 /* Set Clock Comparator */
2518 void HELPER(sckc)(uint64_t a1)
2520 uint64_t time = ldq(a1);
2522 if (time == -1ULL) {
2523 return;
2526 /* difference between now and then */
2527 time -= clock_value(env);
2528 /* nanoseconds */
2529 time = (time * 125) >> 9;
2531 qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
2534 /* Store Clock Comparator */
2535 void HELPER(stckc)(uint64_t a1)
2537 /* XXX implement */
2538 stq(a1, 0);
2541 /* Set CPU Timer */
2542 void HELPER(spt)(uint64_t a1)
2544 uint64_t time = ldq(a1);
2546 if (time == -1ULL) {
2547 return;
2550 /* nanoseconds */
2551 time = (time * 125) >> 9;
2553 qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
2556 /* Store CPU Timer */
2557 void HELPER(stpt)(uint64_t a1)
2559 /* XXX implement */
2560 stq(a1, 0);
2563 /* Store System Information */
2564 uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
2566 int cc = 0;
2567 int sel1, sel2;
2569 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
2570 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
2571 /* valid function code, invalid reserved bits */
2572 program_interrupt(env, PGM_SPECIFICATION, 2);
2575 sel1 = r0 & STSI_R0_SEL1_MASK;
2576 sel2 = r1 & STSI_R1_SEL2_MASK;
2578 /* XXX: spec exception if sysib is not 4k-aligned */
2580 switch (r0 & STSI_LEVEL_MASK) {
2581 case STSI_LEVEL_1:
2582 if ((sel1 == 1) && (sel2 == 1)) {
2583 /* Basic Machine Configuration */
2584 struct sysib_111 sysib;
2586 memset(&sysib, 0, sizeof(sysib));
2587 ebcdic_put(sysib.manuf, "QEMU ", 16);
2588 /* same as machine type number in STORE CPU ID */
2589 ebcdic_put(sysib.type, "QEMU", 4);
2590 /* same as model number in STORE CPU ID */
2591 ebcdic_put(sysib.model, "QEMU ", 16);
2592 ebcdic_put(sysib.sequence, "QEMU ", 16);
2593 ebcdic_put(sysib.plant, "QEMU", 4);
2594 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2595 } else if ((sel1 == 2) && (sel2 == 1)) {
2596 /* Basic Machine CPU */
2597 struct sysib_121 sysib;
2599 memset(&sysib, 0, sizeof(sysib));
2600 /* XXX make different for different CPUs? */
2601 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2602 ebcdic_put(sysib.plant, "QEMU", 4);
2603 stw_p(&sysib.cpu_addr, env->cpu_num);
2604 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2605 } else if ((sel1 == 2) && (sel2 == 2)) {
2606 /* Basic Machine CPUs */
2607 struct sysib_122 sysib;
2609 memset(&sysib, 0, sizeof(sysib));
2610 stl_p(&sysib.capability, 0x443afc29);
2611 /* XXX change when SMP comes */
2612 stw_p(&sysib.total_cpus, 1);
2613 stw_p(&sysib.active_cpus, 1);
2614 stw_p(&sysib.standby_cpus, 0);
2615 stw_p(&sysib.reserved_cpus, 0);
2616 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2617 } else {
2618 cc = 3;
2620 break;
2621 case STSI_LEVEL_2:
2623 if ((sel1 == 2) && (sel2 == 1)) {
2624 /* LPAR CPU */
2625 struct sysib_221 sysib;
2627 memset(&sysib, 0, sizeof(sysib));
2628 /* XXX make different for different CPUs? */
2629 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2630 ebcdic_put(sysib.plant, "QEMU", 4);
2631 stw_p(&sysib.cpu_addr, env->cpu_num);
2632 stw_p(&sysib.cpu_id, 0);
2633 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2634 } else if ((sel1 == 2) && (sel2 == 2)) {
2635 /* LPAR CPUs */
2636 struct sysib_222 sysib;
2638 memset(&sysib, 0, sizeof(sysib));
2639 stw_p(&sysib.lpar_num, 0);
2640 sysib.lcpuc = 0;
2641 /* XXX change when SMP comes */
2642 stw_p(&sysib.total_cpus, 1);
2643 stw_p(&sysib.conf_cpus, 1);
2644 stw_p(&sysib.standby_cpus, 0);
2645 stw_p(&sysib.reserved_cpus, 0);
2646 ebcdic_put(sysib.name, "QEMU ", 8);
2647 stl_p(&sysib.caf, 1000);
2648 stw_p(&sysib.dedicated_cpus, 0);
2649 stw_p(&sysib.shared_cpus, 0);
2650 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2651 } else {
2652 cc = 3;
2654 break;
2656 case STSI_LEVEL_3:
2658 if ((sel1 == 2) && (sel2 == 2)) {
2659 /* VM CPUs */
2660 struct sysib_322 sysib;
2662 memset(&sysib, 0, sizeof(sysib));
2663 sysib.count = 1;
2664 /* XXX change when SMP comes */
2665 stw_p(&sysib.vm[0].total_cpus, 1);
2666 stw_p(&sysib.vm[0].conf_cpus, 1);
2667 stw_p(&sysib.vm[0].standby_cpus, 0);
2668 stw_p(&sysib.vm[0].reserved_cpus, 0);
2669 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
2670 stl_p(&sysib.vm[0].caf, 1000);
2671 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
2672 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2673 } else {
2674 cc = 3;
2676 break;
2678 case STSI_LEVEL_CURRENT:
2679 env->regs[0] = STSI_LEVEL_3;
2680 break;
2681 default:
2682 cc = 3;
2683 break;
2686 return cc;
2689 void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
2691 int i;
2692 uint64_t src = a2;
2694 for (i = r1;; i = (i + 1) % 16) {
2695 env->cregs[i] = ldq(src);
2696 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
2697 i, src, env->cregs[i]);
2698 src += sizeof(uint64_t);
2700 if (i == r3) {
2701 break;
2705 tlb_flush(env, 1);
2708 void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2710 int i;
2711 uint64_t src = a2;
2713 for (i = r1;; i = (i + 1) % 16) {
2714 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
2715 src += sizeof(uint32_t);
2717 if (i == r3) {
2718 break;
2722 tlb_flush(env, 1);
2725 void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
2727 int i;
2728 uint64_t dest = a2;
2730 for (i = r1;; i = (i + 1) % 16) {
2731 stq(dest, env->cregs[i]);
2732 dest += sizeof(uint64_t);
2734 if (i == r3) {
2735 break;
2740 void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2742 int i;
2743 uint64_t dest = a2;
2745 for (i = r1;; i = (i + 1) % 16) {
2746 stl(dest, env->cregs[i]);
2747 dest += sizeof(uint32_t);
2749 if (i == r3) {
2750 break;
2755 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
2757 /* XXX implement */
2759 return 0;
2762 /* insert storage key extended */
2763 uint64_t HELPER(iske)(uint64_t r2)
2765 uint64_t addr = get_address(0, 0, r2);
2767 if (addr > ram_size) {
2768 return 0;
2771 return env->storage_keys[addr / TARGET_PAGE_SIZE];
2774 /* set storage key extended */
2775 void HELPER(sske)(uint32_t r1, uint64_t r2)
2777 uint64_t addr = get_address(0, 0, r2);
2779 if (addr > ram_size) {
2780 return;
2783 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
2786 /* reset reference bit extended */
2787 uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
2789 uint8_t re;
2790 uint8_t key;
2791 if (r2 > ram_size) {
2792 return 0;
2795 key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
2796 re = key & (SK_R | SK_C);
2797 env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
2800 * cc
2802 * 0 Reference bit zero; change bit zero
2803 * 1 Reference bit zero; change bit one
2804 * 2 Reference bit one; change bit zero
2805 * 3 Reference bit one; change bit one
2808 return re >> 1;
2811 /* compare and swap and purge */
2812 uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
2814 uint32_t cc;
2815 uint32_t o1 = env->regs[r1];
2816 uint64_t a2 = get_address_31fix(r2) & ~3ULL;
2817 uint32_t o2 = ldl(a2);
2819 if (o1 == o2) {
2820 stl(a2, env->regs[(r1 + 1) & 15]);
2821 if (env->regs[r2] & 0x3) {
2822 /* flush TLB / ALB */
2823 tlb_flush(env, 1);
2825 cc = 0;
2826 } else {
2827 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
2828 cc = 1;
2831 return cc;
2834 static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
2835 uint64_t mode2)
2837 target_ulong src, dest;
2838 int flags, cc = 0, i;
2840 if (!l) {
2841 return 0;
2842 } else if (l > 256) {
2843 /* max 256 */
2844 l = 256;
2845 cc = 3;
2848 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
2849 cpu_loop_exit(env);
2851 dest |= a1 & ~TARGET_PAGE_MASK;
2853 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
2854 cpu_loop_exit(env);
2856 src |= a2 & ~TARGET_PAGE_MASK;
2858 /* XXX replace w/ memcpy */
2859 for (i = 0; i < l; i++) {
2860 /* XXX be more clever */
2861 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
2862 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
2863 mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
2864 break;
2866 stb_phys(dest + i, ldub_phys(src + i));
2869 return cc;
2872 uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
2874 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2875 __FUNCTION__, l, a1, a2);
2877 return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
2880 uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
2882 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2883 __FUNCTION__, l, a1, a2);
2885 return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
2888 uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
2890 int cc = 0;
2892 HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
2893 __FUNCTION__, order_code, r1, cpu_addr);
2895 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2896 as parameter (input). Status (output) is always R1. */
2898 switch (order_code) {
2899 case SIGP_SET_ARCH:
2900 /* switch arch */
2901 break;
2902 case SIGP_SENSE:
2903 /* enumerate CPU status */
2904 if (cpu_addr) {
2905 /* XXX implement when SMP comes */
2906 return 3;
2908 env->regs[r1] &= 0xffffffff00000000ULL;
2909 cc = 1;
2910 break;
2911 #if !defined (CONFIG_USER_ONLY)
2912 case SIGP_RESTART:
2913 qemu_system_reset_request();
2914 cpu_loop_exit(env);
2915 break;
2916 case SIGP_STOP:
2917 qemu_system_shutdown_request();
2918 cpu_loop_exit(env);
2919 break;
2920 #endif
2921 default:
2922 /* unknown sigp */
2923 fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
2924 cc = 3;
2927 return cc;
2930 void HELPER(sacf)(uint64_t a1)
2932 HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
2934 switch (a1 & 0xf00) {
2935 case 0x000:
2936 env->psw.mask &= ~PSW_MASK_ASC;
2937 env->psw.mask |= PSW_ASC_PRIMARY;
2938 break;
2939 case 0x100:
2940 env->psw.mask &= ~PSW_MASK_ASC;
2941 env->psw.mask |= PSW_ASC_SECONDARY;
2942 break;
2943 case 0x300:
2944 env->psw.mask &= ~PSW_MASK_ASC;
2945 env->psw.mask |= PSW_ASC_HOME;
2946 break;
2947 default:
2948 qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
2949 program_interrupt(env, PGM_SPECIFICATION, 2);
2950 break;
2954 /* invalidate pte */
2955 void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
2957 uint64_t page = vaddr & TARGET_PAGE_MASK;
2958 uint64_t pte = 0;
2960 /* XXX broadcast to other CPUs */
2962 /* XXX Linux is nice enough to give us the exact pte address.
2963 According to spec we'd have to find it out ourselves */
2964 /* XXX Linux is fine with overwriting the pte, the spec requires
2965 us to only set the invalid bit */
2966 stq_phys(pte_addr, pte | _PAGE_INVALID);
2968 /* XXX we exploit the fact that Linux passes the exact virtual
2969 address here - it's not obliged to! */
2970 tlb_flush_page(env, page);
2972 /* XXX 31-bit hack */
2973 if (page & 0x80000000) {
2974 tlb_flush_page(env, page & ~0x80000000);
2975 } else {
2976 tlb_flush_page(env, page | 0x80000000);
2980 /* flush local tlb */
2981 void HELPER(ptlb)(void)
2983 tlb_flush(env, 1);
2986 /* store using real address */
2987 void HELPER(stura)(uint64_t addr, uint32_t v1)
2989 stw_phys(get_address(0, 0, addr), v1);
2992 /* load real address */
2993 uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
2995 uint32_t cc = 0;
2996 int old_exc = env->exception_index;
2997 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2998 uint64_t ret;
2999 int flags;
3001 /* XXX incomplete - has more corner cases */
3002 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
3003 program_interrupt(env, PGM_SPECIAL_OP, 2);
3006 env->exception_index = old_exc;
3007 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
3008 cc = 3;
3010 if (env->exception_index == EXCP_PGM) {
3011 ret = env->int_pgm_code | 0x80000000;
3012 } else {
3013 ret |= addr & ~TARGET_PAGE_MASK;
3015 env->exception_index = old_exc;
3017 if (!(env->psw.mask & PSW_MASK_64)) {
3018 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
3019 } else {
3020 env->regs[r1] = ret;
3023 return cc;
3026 #endif