Merge remote-tracking branch 'stefanha/net' into staging
[qemu-kvm.git] / target-s390x / op_helper.c
blob7b7247316ec06367be83a9f5512eec9c5636d880
1 /*
2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
24 #include "helper.h"
25 #include <string.h>
26 #include "kvm.h"
27 #include "qemu-timer.h"
28 #ifdef CONFIG_KVM
29 #include <linux/kvm.h>
30 #endif
32 #if !defined (CONFIG_USER_ONLY)
33 #include "sysemu.h"
34 #endif
36 /*****************************************************************************/
37 /* Softmmu support */
38 #if !defined (CONFIG_USER_ONLY)
39 #include "softmmu_exec.h"
41 #define MMUSUFFIX _mmu
43 #define SHIFT 0
44 #include "softmmu_template.h"
46 #define SHIFT 1
47 #include "softmmu_template.h"
49 #define SHIFT 2
50 #include "softmmu_template.h"
52 #define SHIFT 3
53 #include "softmmu_template.h"
55 /* try to fill the TLB and return an exception if error. If retaddr is
56 NULL, it means that the function was called in C code (i.e. not
57 from generated code or from helper.c) */
58 /* XXX: fix it to restore all registers */
59 void tlb_fill(CPUS390XState *env1, target_ulong addr, int is_write, int mmu_idx,
60 uintptr_t retaddr)
62 TranslationBlock *tb;
63 CPUS390XState *saved_env;
64 int ret;
66 saved_env = env;
67 env = env1;
68 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
69 if (unlikely(ret != 0)) {
70 if (likely(retaddr)) {
71 /* now we have a real cpu fault */
72 tb = tb_find_pc(retaddr);
73 if (likely(tb)) {
74 /* the PC is inside the translated code. It means that we have
75 a virtual CPU fault */
76 cpu_restore_state(tb, env, retaddr);
79 cpu_loop_exit(env);
81 env = saved_env;
84 #endif
86 /* #define DEBUG_HELPER */
87 #ifdef DEBUG_HELPER
88 #define HELPER_LOG(x...) qemu_log(x)
89 #else
90 #define HELPER_LOG(x...)
91 #endif
93 /* raise an exception */
94 void HELPER(exception)(uint32_t excp)
96 HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
97 env->exception_index = excp;
98 cpu_loop_exit(env);
101 #ifndef CONFIG_USER_ONLY
102 static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
103 uint8_t byte)
105 target_phys_addr_t dest_phys;
106 target_phys_addr_t len = l;
107 void *dest_p;
108 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
109 int flags;
111 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
112 stb(dest, byte);
113 cpu_abort(env, "should never reach here");
115 dest_phys |= dest & ~TARGET_PAGE_MASK;
117 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
119 memset(dest_p, byte, len);
121 cpu_physical_memory_unmap(dest_p, 1, len, len);
124 static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
125 uint64_t src)
127 target_phys_addr_t dest_phys;
128 target_phys_addr_t src_phys;
129 target_phys_addr_t len = l;
130 void *dest_p;
131 void *src_p;
132 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
133 int flags;
135 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
136 stb(dest, 0);
137 cpu_abort(env, "should never reach here");
139 dest_phys |= dest & ~TARGET_PAGE_MASK;
141 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
142 ldub(src);
143 cpu_abort(env, "should never reach here");
145 src_phys |= src & ~TARGET_PAGE_MASK;
147 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
148 src_p = cpu_physical_memory_map(src_phys, &len, 0);
150 memmove(dest_p, src_p, len);
152 cpu_physical_memory_unmap(dest_p, 1, len, len);
153 cpu_physical_memory_unmap(src_p, 0, len, len);
155 #endif
157 /* and on array */
158 uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
160 int i;
161 unsigned char x;
162 uint32_t cc = 0;
164 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
165 __FUNCTION__, l, dest, src);
166 for (i = 0; i <= l; i++) {
167 x = ldub(dest + i) & ldub(src + i);
168 if (x) {
169 cc = 1;
171 stb(dest + i, x);
173 return cc;
176 /* xor on array */
177 uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
179 int i;
180 unsigned char x;
181 uint32_t cc = 0;
183 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
184 __FUNCTION__, l, dest, src);
186 #ifndef CONFIG_USER_ONLY
187 /* xor with itself is the same as memset(0) */
188 if ((l > 32) && (src == dest) &&
189 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
190 mvc_fast_memset(env, l + 1, dest, 0);
191 return 0;
193 #else
194 if (src == dest) {
195 memset(g2h(dest), 0, l + 1);
196 return 0;
198 #endif
200 for (i = 0; i <= l; i++) {
201 x = ldub(dest + i) ^ ldub(src + i);
202 if (x) {
203 cc = 1;
205 stb(dest + i, x);
207 return cc;
210 /* or on array */
211 uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
213 int i;
214 unsigned char x;
215 uint32_t cc = 0;
217 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
218 __FUNCTION__, l, dest, src);
219 for (i = 0; i <= l; i++) {
220 x = ldub(dest + i) | ldub(src + i);
221 if (x) {
222 cc = 1;
224 stb(dest + i, x);
226 return cc;
229 /* memmove */
230 void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
232 int i = 0;
233 int x = 0;
234 uint32_t l_64 = (l + 1) / 8;
236 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
237 __FUNCTION__, l, dest, src);
239 #ifndef CONFIG_USER_ONLY
240 if ((l > 32) &&
241 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
242 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
243 if (dest == (src + 1)) {
244 mvc_fast_memset(env, l + 1, dest, ldub(src));
245 return;
246 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
247 mvc_fast_memmove(env, l + 1, dest, src);
248 return;
251 #else
252 if (dest == (src + 1)) {
253 memset(g2h(dest), ldub(src), l + 1);
254 return;
255 } else {
256 memmove(g2h(dest), g2h(src), l + 1);
257 return;
259 #endif
261 /* handle the parts that fit into 8-byte loads/stores */
262 if (dest != (src + 1)) {
263 for (i = 0; i < l_64; i++) {
264 stq(dest + x, ldq(src + x));
265 x += 8;
269 /* slow version crossing pages with byte accesses */
270 for (i = x; i <= l; i++) {
271 stb(dest + i, ldub(src + i));
275 /* compare unsigned byte arrays */
276 uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
278 int i;
279 unsigned char x,y;
280 uint32_t cc;
281 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
282 __FUNCTION__, l, s1, s2);
283 for (i = 0; i <= l; i++) {
284 x = ldub(s1 + i);
285 y = ldub(s2 + i);
286 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
287 if (x < y) {
288 cc = 1;
289 goto done;
290 } else if (x > y) {
291 cc = 2;
292 goto done;
295 cc = 0;
296 done:
297 HELPER_LOG("\n");
298 return cc;
301 /* compare logical under mask */
302 uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
304 uint8_t r,d;
305 uint32_t cc;
306 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
307 mask, addr);
308 cc = 0;
309 while (mask) {
310 if (mask & 8) {
311 d = ldub(addr);
312 r = (r1 & 0xff000000UL) >> 24;
313 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
314 addr);
315 if (r < d) {
316 cc = 1;
317 break;
318 } else if (r > d) {
319 cc = 2;
320 break;
322 addr++;
324 mask = (mask << 1) & 0xf;
325 r1 <<= 8;
327 HELPER_LOG("\n");
328 return cc;
331 /* store character under mask */
332 void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
334 uint8_t r;
335 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
336 addr);
337 while (mask) {
338 if (mask & 8) {
339 r = (r1 & 0xff000000UL) >> 24;
340 stb(addr, r);
341 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
342 addr++;
344 mask = (mask << 1) & 0xf;
345 r1 <<= 8;
347 HELPER_LOG("\n");
350 /* 64/64 -> 128 unsigned multiplication */
351 void HELPER(mlg)(uint32_t r1, uint64_t v2)
353 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
354 /* assuming 64-bit hosts have __uint128_t */
355 __uint128_t res = (__uint128_t)env->regs[r1 + 1];
356 res *= (__uint128_t)v2;
357 env->regs[r1] = (uint64_t)(res >> 64);
358 env->regs[r1 + 1] = (uint64_t)res;
359 #else
360 mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
361 #endif
364 /* 128 -> 64/64 unsigned division */
365 void HELPER(dlg)(uint32_t r1, uint64_t v2)
367 uint64_t divisor = v2;
369 if (!env->regs[r1]) {
370 /* 64 -> 64/64 case */
371 env->regs[r1] = env->regs[r1+1] % divisor;
372 env->regs[r1+1] = env->regs[r1+1] / divisor;
373 return;
374 } else {
376 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
377 /* assuming 64-bit hosts have __uint128_t */
378 __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
379 (env->regs[r1+1]);
380 __uint128_t quotient = dividend / divisor;
381 env->regs[r1+1] = quotient;
382 __uint128_t remainder = dividend % divisor;
383 env->regs[r1] = remainder;
384 #else
385 /* 32-bit hosts would need special wrapper functionality - just abort if
386 we encounter such a case; it's very unlikely anyways. */
387 cpu_abort(env, "128 -> 64/64 division not implemented\n");
388 #endif
392 static inline uint64_t get_address(int x2, int b2, int d2)
394 uint64_t r = d2;
396 if (x2) {
397 r += env->regs[x2];
400 if (b2) {
401 r += env->regs[b2];
404 /* 31-Bit mode */
405 if (!(env->psw.mask & PSW_MASK_64)) {
406 r &= 0x7fffffff;
409 return r;
412 static inline uint64_t get_address_31fix(int reg)
414 uint64_t r = env->regs[reg];
416 /* 31-Bit mode */
417 if (!(env->psw.mask & PSW_MASK_64)) {
418 r &= 0x7fffffff;
421 return r;
424 /* search string (c is byte to search, r2 is string, r1 end of string) */
425 uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
427 uint64_t i;
428 uint32_t cc = 2;
429 uint64_t str = get_address_31fix(r2);
430 uint64_t end = get_address_31fix(r1);
432 HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
433 c, env->regs[r1], env->regs[r2]);
435 for (i = str; i != end; i++) {
436 if (ldub(i) == c) {
437 env->regs[r1] = i;
438 cc = 1;
439 break;
443 return cc;
446 /* unsigned string compare (c is string terminator) */
447 uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
449 uint64_t s1 = get_address_31fix(r1);
450 uint64_t s2 = get_address_31fix(r2);
451 uint8_t v1, v2;
452 uint32_t cc;
453 c = c & 0xff;
454 #ifdef CONFIG_USER_ONLY
455 if (!c) {
456 HELPER_LOG("%s: comparing '%s' and '%s'\n",
457 __FUNCTION__, (char*)g2h(s1), (char*)g2h(s2));
459 #endif
460 for (;;) {
461 v1 = ldub(s1);
462 v2 = ldub(s2);
463 if ((v1 == c || v2 == c) || (v1 != v2)) {
464 break;
466 s1++;
467 s2++;
470 if (v1 == v2) {
471 cc = 0;
472 } else {
473 cc = (v1 < v2) ? 1 : 2;
474 /* FIXME: 31-bit mode! */
475 env->regs[r1] = s1;
476 env->regs[r2] = s2;
478 return cc;
481 /* move page */
482 void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
484 /* XXX missing r0 handling */
485 #ifdef CONFIG_USER_ONLY
486 int i;
488 for (i = 0; i < TARGET_PAGE_SIZE; i++) {
489 stb(r1 + i, ldub(r2 + i));
491 #else
492 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
493 #endif
496 /* string copy (c is string terminator) */
497 void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
499 uint64_t dest = get_address_31fix(r1);
500 uint64_t src = get_address_31fix(r2);
501 uint8_t v;
502 c = c & 0xff;
503 #ifdef CONFIG_USER_ONLY
504 if (!c) {
505 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src),
506 dest);
508 #endif
509 for (;;) {
510 v = ldub(src);
511 stb(dest, v);
512 if (v == c) {
513 break;
515 src++;
516 dest++;
518 env->regs[r1] = dest; /* FIXME: 31-bit mode! */
521 /* compare and swap 64-bit */
522 uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
524 /* FIXME: locking? */
525 uint32_t cc;
526 uint64_t v2 = ldq(a2);
527 if (env->regs[r1] == v2) {
528 cc = 0;
529 stq(a2, env->regs[r3]);
530 } else {
531 cc = 1;
532 env->regs[r1] = v2;
534 return cc;
537 /* compare double and swap 64-bit */
538 uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
540 /* FIXME: locking? */
541 uint32_t cc;
542 uint64_t v2_hi = ldq(a2);
543 uint64_t v2_lo = ldq(a2 + 8);
544 uint64_t v1_hi = env->regs[r1];
545 uint64_t v1_lo = env->regs[r1 + 1];
547 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
548 cc = 0;
549 stq(a2, env->regs[r3]);
550 stq(a2 + 8, env->regs[r3 + 1]);
551 } else {
552 cc = 1;
553 env->regs[r1] = v2_hi;
554 env->regs[r1 + 1] = v2_lo;
557 return cc;
560 /* compare and swap 32-bit */
561 uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
563 /* FIXME: locking? */
564 uint32_t cc;
565 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
566 uint32_t v2 = ldl(a2);
567 if (((uint32_t)env->regs[r1]) == v2) {
568 cc = 0;
569 stl(a2, (uint32_t)env->regs[r3]);
570 } else {
571 cc = 1;
572 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
574 return cc;
577 static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
579 int pos = 24; /* top of the lower half of r1 */
580 uint64_t rmask = 0xff000000ULL;
581 uint8_t val = 0;
582 int ccd = 0;
583 uint32_t cc = 0;
585 while (mask) {
586 if (mask & 8) {
587 env->regs[r1] &= ~rmask;
588 val = ldub(address);
589 if ((val & 0x80) && !ccd) {
590 cc = 1;
592 ccd = 1;
593 if (val && cc == 0) {
594 cc = 2;
596 env->regs[r1] |= (uint64_t)val << pos;
597 address++;
599 mask = (mask << 1) & 0xf;
600 pos -= 8;
601 rmask >>= 8;
604 return cc;
607 /* execute instruction
608 this instruction executes an insn modified with the contents of r1
609 it does not change the executed instruction in memory
610 it does not change the program counter
611 in other words: tricky...
612 currently implemented by interpreting the cases it is most commonly used in
614 uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
616 uint16_t insn = lduw_code(addr);
617 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
618 insn);
619 if ((insn & 0xf0ff) == 0xd000) {
620 uint32_t l, insn2, b1, b2, d1, d2;
621 l = v1 & 0xff;
622 insn2 = ldl_code(addr + 2);
623 b1 = (insn2 >> 28) & 0xf;
624 b2 = (insn2 >> 12) & 0xf;
625 d1 = (insn2 >> 16) & 0xfff;
626 d2 = insn2 & 0xfff;
627 switch (insn & 0xf00) {
628 case 0x200:
629 helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
630 break;
631 case 0x500:
632 cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
633 break;
634 case 0x700:
635 cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
636 break;
637 case 0xc00:
638 helper_tr(l, get_address(0, b1, d1), get_address(0, b2, d2));
639 break;
640 default:
641 goto abort;
642 break;
644 } else if ((insn & 0xff00) == 0x0a00) {
645 /* supervisor call */
646 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
647 env->psw.addr = ret - 4;
648 env->int_svc_code = (insn|v1) & 0xff;
649 env->int_svc_ilc = 4;
650 helper_exception(EXCP_SVC);
651 } else if ((insn & 0xff00) == 0xbf00) {
652 uint32_t insn2, r1, r3, b2, d2;
653 insn2 = ldl_code(addr + 2);
654 r1 = (insn2 >> 20) & 0xf;
655 r3 = (insn2 >> 16) & 0xf;
656 b2 = (insn2 >> 12) & 0xf;
657 d2 = insn2 & 0xfff;
658 cc = helper_icm(r1, get_address(0, b2, d2), r3);
659 } else {
660 abort:
661 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
662 insn);
664 return cc;
667 /* absolute value 32-bit */
668 uint32_t HELPER(abs_i32)(int32_t val)
670 if (val < 0) {
671 return -val;
672 } else {
673 return val;
677 /* negative absolute value 32-bit */
678 int32_t HELPER(nabs_i32)(int32_t val)
680 if (val < 0) {
681 return val;
682 } else {
683 return -val;
687 /* absolute value 64-bit */
688 uint64_t HELPER(abs_i64)(int64_t val)
690 HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
692 if (val < 0) {
693 return -val;
694 } else {
695 return val;
699 /* negative absolute value 64-bit */
700 int64_t HELPER(nabs_i64)(int64_t val)
702 if (val < 0) {
703 return val;
704 } else {
705 return -val;
709 /* add with carry 32-bit unsigned */
710 uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
712 uint32_t res;
714 res = v1 + v2;
715 if (cc & 2) {
716 res++;
719 return res;
722 /* store character under mask high operates on the upper half of r1 */
723 void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
725 int pos = 56; /* top of the upper half of r1 */
727 while (mask) {
728 if (mask & 8) {
729 stb(address, (env->regs[r1] >> pos) & 0xff);
730 address++;
732 mask = (mask << 1) & 0xf;
733 pos -= 8;
737 /* insert character under mask high; same as icm, but operates on the
738 upper half of r1 */
739 uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
741 int pos = 56; /* top of the upper half of r1 */
742 uint64_t rmask = 0xff00000000000000ULL;
743 uint8_t val = 0;
744 int ccd = 0;
745 uint32_t cc = 0;
747 while (mask) {
748 if (mask & 8) {
749 env->regs[r1] &= ~rmask;
750 val = ldub(address);
751 if ((val & 0x80) && !ccd) {
752 cc = 1;
754 ccd = 1;
755 if (val && cc == 0) {
756 cc = 2;
758 env->regs[r1] |= (uint64_t)val << pos;
759 address++;
761 mask = (mask << 1) & 0xf;
762 pos -= 8;
763 rmask >>= 8;
766 return cc;
769 /* insert psw mask and condition code into r1 */
770 void HELPER(ipm)(uint32_t cc, uint32_t r1)
772 uint64_t r = env->regs[r1];
774 r &= 0xffffffff00ffffffULL;
775 r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
776 env->regs[r1] = r;
777 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
778 cc, env->psw.mask, r);
781 /* load access registers r1 to r3 from memory at a2 */
782 void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
784 int i;
786 for (i = r1;; i = (i + 1) % 16) {
787 env->aregs[i] = ldl(a2);
788 a2 += 4;
790 if (i == r3) {
791 break;
796 /* store access registers r1 to r3 in memory at a2 */
797 void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
799 int i;
801 for (i = r1;; i = (i + 1) % 16) {
802 stl(a2, env->aregs[i]);
803 a2 += 4;
805 if (i == r3) {
806 break;
811 /* move long */
812 uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
814 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
815 uint64_t dest = get_address_31fix(r1);
816 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
817 uint64_t src = get_address_31fix(r2);
818 uint8_t pad = src >> 24;
819 uint8_t v;
820 uint32_t cc;
822 if (destlen == srclen) {
823 cc = 0;
824 } else if (destlen < srclen) {
825 cc = 1;
826 } else {
827 cc = 2;
830 if (srclen > destlen) {
831 srclen = destlen;
834 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
835 v = ldub(src);
836 stb(dest, v);
839 for (; destlen; dest++, destlen--) {
840 stb(dest, pad);
843 env->regs[r1 + 1] = destlen;
844 /* can't use srclen here, we trunc'ed it */
845 env->regs[r2 + 1] -= src - env->regs[r2];
846 env->regs[r1] = dest;
847 env->regs[r2] = src;
849 return cc;
852 /* move long extended another memcopy insn with more bells and whistles */
853 uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
855 uint64_t destlen = env->regs[r1 + 1];
856 uint64_t dest = env->regs[r1];
857 uint64_t srclen = env->regs[r3 + 1];
858 uint64_t src = env->regs[r3];
859 uint8_t pad = a2 & 0xff;
860 uint8_t v;
861 uint32_t cc;
863 if (!(env->psw.mask & PSW_MASK_64)) {
864 destlen = (uint32_t)destlen;
865 srclen = (uint32_t)srclen;
866 dest &= 0x7fffffff;
867 src &= 0x7fffffff;
870 if (destlen == srclen) {
871 cc = 0;
872 } else if (destlen < srclen) {
873 cc = 1;
874 } else {
875 cc = 2;
878 if (srclen > destlen) {
879 srclen = destlen;
882 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
883 v = ldub(src);
884 stb(dest, v);
887 for (; destlen; dest++, destlen--) {
888 stb(dest, pad);
891 env->regs[r1 + 1] = destlen;
892 /* can't use srclen here, we trunc'ed it */
893 /* FIXME: 31-bit mode! */
894 env->regs[r3 + 1] -= src - env->regs[r3];
895 env->regs[r1] = dest;
896 env->regs[r3] = src;
898 return cc;
901 /* compare logical long extended memcompare insn with padding */
902 uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
904 uint64_t destlen = env->regs[r1 + 1];
905 uint64_t dest = get_address_31fix(r1);
906 uint64_t srclen = env->regs[r3 + 1];
907 uint64_t src = get_address_31fix(r3);
908 uint8_t pad = a2 & 0xff;
909 uint8_t v1 = 0,v2 = 0;
910 uint32_t cc = 0;
912 if (!(destlen || srclen)) {
913 return cc;
916 if (srclen > destlen) {
917 srclen = destlen;
920 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
921 v1 = srclen ? ldub(src) : pad;
922 v2 = destlen ? ldub(dest) : pad;
923 if (v1 != v2) {
924 cc = (v1 < v2) ? 1 : 2;
925 break;
929 env->regs[r1 + 1] = destlen;
930 /* can't use srclen here, we trunc'ed it */
931 env->regs[r3 + 1] -= src - env->regs[r3];
932 env->regs[r1] = dest;
933 env->regs[r3] = src;
935 return cc;
938 /* subtract unsigned v2 from v1 with borrow */
939 uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
941 uint32_t v1 = env->regs[r1];
942 uint32_t res = v1 + (~v2) + (cc >> 1);
944 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
945 if (cc & 2) {
946 /* borrow */
947 return v1 ? 1 : 0;
948 } else {
949 return v1 ? 3 : 2;
953 /* subtract unsigned v2 from v1 with borrow */
954 uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
956 uint64_t res = v1 + (~v2) + (cc >> 1);
958 env->regs[r1] = res;
959 if (cc & 2) {
960 /* borrow */
961 return v1 ? 1 : 0;
962 } else {
963 return v1 ? 3 : 2;
967 static inline int float_comp_to_cc(int float_compare)
969 switch (float_compare) {
970 case float_relation_equal:
971 return 0;
972 case float_relation_less:
973 return 1;
974 case float_relation_greater:
975 return 2;
976 case float_relation_unordered:
977 return 3;
978 default:
979 cpu_abort(env, "unknown return value for float compare\n");
983 /* condition codes for binary FP ops */
984 static uint32_t set_cc_f32(float32 v1, float32 v2)
986 return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
989 static uint32_t set_cc_f64(float64 v1, float64 v2)
991 return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
994 /* condition codes for unary FP ops */
995 static uint32_t set_cc_nz_f32(float32 v)
997 if (float32_is_any_nan(v)) {
998 return 3;
999 } else if (float32_is_zero(v)) {
1000 return 0;
1001 } else if (float32_is_neg(v)) {
1002 return 1;
1003 } else {
1004 return 2;
1008 static uint32_t set_cc_nz_f64(float64 v)
1010 if (float64_is_any_nan(v)) {
1011 return 3;
1012 } else if (float64_is_zero(v)) {
1013 return 0;
1014 } else if (float64_is_neg(v)) {
1015 return 1;
1016 } else {
1017 return 2;
1021 static uint32_t set_cc_nz_f128(float128 v)
1023 if (float128_is_any_nan(v)) {
1024 return 3;
1025 } else if (float128_is_zero(v)) {
1026 return 0;
1027 } else if (float128_is_neg(v)) {
1028 return 1;
1029 } else {
1030 return 2;
1034 /* convert 32-bit int to 64-bit float */
1035 void HELPER(cdfbr)(uint32_t f1, int32_t v2)
1037 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
1038 env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
1041 /* convert 32-bit int to 128-bit float */
1042 void HELPER(cxfbr)(uint32_t f1, int32_t v2)
1044 CPU_QuadU v1;
1045 v1.q = int32_to_float128(v2, &env->fpu_status);
1046 env->fregs[f1].ll = v1.ll.upper;
1047 env->fregs[f1 + 2].ll = v1.ll.lower;
1050 /* convert 64-bit int to 32-bit float */
1051 void HELPER(cegbr)(uint32_t f1, int64_t v2)
1053 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1054 env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
1057 /* convert 64-bit int to 64-bit float */
1058 void HELPER(cdgbr)(uint32_t f1, int64_t v2)
1060 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1061 env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
1064 /* convert 64-bit int to 128-bit float */
1065 void HELPER(cxgbr)(uint32_t f1, int64_t v2)
1067 CPU_QuadU x1;
1068 x1.q = int64_to_float128(v2, &env->fpu_status);
1069 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
1070 x1.ll.upper, x1.ll.lower);
1071 env->fregs[f1].ll = x1.ll.upper;
1072 env->fregs[f1 + 2].ll = x1.ll.lower;
1075 /* convert 32-bit int to 32-bit float */
1076 void HELPER(cefbr)(uint32_t f1, int32_t v2)
1078 env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
1079 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
1080 env->fregs[f1].l.upper, f1);
1083 /* 32-bit FP addition RR */
1084 uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
1086 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1087 env->fregs[f2].l.upper,
1088 &env->fpu_status);
1089 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1090 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1092 return set_cc_nz_f32(env->fregs[f1].l.upper);
1095 /* 64-bit FP addition RR */
1096 uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
1098 env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
1099 &env->fpu_status);
1100 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
1101 env->fregs[f2].d, env->fregs[f1].d, f1);
1103 return set_cc_nz_f64(env->fregs[f1].d);
1106 /* 32-bit FP subtraction RR */
1107 uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
1109 env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
1110 env->fregs[f2].l.upper,
1111 &env->fpu_status);
1112 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1113 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1115 return set_cc_nz_f32(env->fregs[f1].l.upper);
1118 /* 64-bit FP subtraction RR */
1119 uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
1121 env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
1122 &env->fpu_status);
1123 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1124 __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
1126 return set_cc_nz_f64(env->fregs[f1].d);
1129 /* 32-bit FP division RR */
1130 void HELPER(debr)(uint32_t f1, uint32_t f2)
1132 env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
1133 env->fregs[f2].l.upper,
1134 &env->fpu_status);
1137 /* 128-bit FP division RR */
1138 void HELPER(dxbr)(uint32_t f1, uint32_t f2)
1140 CPU_QuadU v1;
1141 v1.ll.upper = env->fregs[f1].ll;
1142 v1.ll.lower = env->fregs[f1 + 2].ll;
1143 CPU_QuadU v2;
1144 v2.ll.upper = env->fregs[f2].ll;
1145 v2.ll.lower = env->fregs[f2 + 2].ll;
1146 CPU_QuadU res;
1147 res.q = float128_div(v1.q, v2.q, &env->fpu_status);
1148 env->fregs[f1].ll = res.ll.upper;
1149 env->fregs[f1 + 2].ll = res.ll.lower;
1152 /* 64-bit FP multiplication RR */
1153 void HELPER(mdbr)(uint32_t f1, uint32_t f2)
1155 env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
1156 &env->fpu_status);
1159 /* 128-bit FP multiplication RR */
1160 void HELPER(mxbr)(uint32_t f1, uint32_t f2)
1162 CPU_QuadU v1;
1163 v1.ll.upper = env->fregs[f1].ll;
1164 v1.ll.lower = env->fregs[f1 + 2].ll;
1165 CPU_QuadU v2;
1166 v2.ll.upper = env->fregs[f2].ll;
1167 v2.ll.lower = env->fregs[f2 + 2].ll;
1168 CPU_QuadU res;
1169 res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
1170 env->fregs[f1].ll = res.ll.upper;
1171 env->fregs[f1 + 2].ll = res.ll.lower;
1174 /* convert 32-bit float to 64-bit float */
1175 void HELPER(ldebr)(uint32_t r1, uint32_t r2)
1177 env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
1178 &env->fpu_status);
1181 /* convert 128-bit float to 64-bit float */
1182 void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
1184 CPU_QuadU x2;
1185 x2.ll.upper = env->fregs[f2].ll;
1186 x2.ll.lower = env->fregs[f2 + 2].ll;
1187 env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
1188 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
1191 /* convert 64-bit float to 128-bit float */
1192 void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
1194 CPU_QuadU res;
1195 res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
1196 env->fregs[f1].ll = res.ll.upper;
1197 env->fregs[f1 + 2].ll = res.ll.lower;
1200 /* convert 64-bit float to 32-bit float */
1201 void HELPER(ledbr)(uint32_t f1, uint32_t f2)
1203 float64 d2 = env->fregs[f2].d;
1204 env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
1207 /* convert 128-bit float to 32-bit float */
1208 void HELPER(lexbr)(uint32_t f1, uint32_t f2)
1210 CPU_QuadU x2;
1211 x2.ll.upper = env->fregs[f2].ll;
1212 x2.ll.lower = env->fregs[f2 + 2].ll;
1213 env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
1214 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
1217 /* absolute value of 32-bit float */
1218 uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
1220 float32 v1;
1221 float32 v2 = env->fregs[f2].d;
1222 v1 = float32_abs(v2);
1223 env->fregs[f1].d = v1;
1224 return set_cc_nz_f32(v1);
1227 /* absolute value of 64-bit float */
1228 uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
1230 float64 v1;
1231 float64 v2 = env->fregs[f2].d;
1232 v1 = float64_abs(v2);
1233 env->fregs[f1].d = v1;
1234 return set_cc_nz_f64(v1);
1237 /* absolute value of 128-bit float */
1238 uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
1240 CPU_QuadU v1;
1241 CPU_QuadU v2;
1242 v2.ll.upper = env->fregs[f2].ll;
1243 v2.ll.lower = env->fregs[f2 + 2].ll;
1244 v1.q = float128_abs(v2.q);
1245 env->fregs[f1].ll = v1.ll.upper;
1246 env->fregs[f1 + 2].ll = v1.ll.lower;
1247 return set_cc_nz_f128(v1.q);
1250 /* load and test 64-bit float */
1251 uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
1253 env->fregs[f1].d = env->fregs[f2].d;
1254 return set_cc_nz_f64(env->fregs[f1].d);
1257 /* load and test 32-bit float */
1258 uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
1260 env->fregs[f1].l.upper = env->fregs[f2].l.upper;
1261 return set_cc_nz_f32(env->fregs[f1].l.upper);
1264 /* load and test 128-bit float */
1265 uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
1267 CPU_QuadU x;
1268 x.ll.upper = env->fregs[f2].ll;
1269 x.ll.lower = env->fregs[f2 + 2].ll;
1270 env->fregs[f1].ll = x.ll.upper;
1271 env->fregs[f1 + 2].ll = x.ll.lower;
1272 return set_cc_nz_f128(x.q);
1275 /* load complement of 32-bit float */
1276 uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
1278 env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
1280 return set_cc_nz_f32(env->fregs[f1].l.upper);
1283 /* load complement of 64-bit float */
1284 uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
1286 env->fregs[f1].d = float64_chs(env->fregs[f2].d);
1288 return set_cc_nz_f64(env->fregs[f1].d);
1291 /* load complement of 128-bit float */
1292 uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
1294 CPU_QuadU x1, x2;
1295 x2.ll.upper = env->fregs[f2].ll;
1296 x2.ll.lower = env->fregs[f2 + 2].ll;
1297 x1.q = float128_chs(x2.q);
1298 env->fregs[f1].ll = x1.ll.upper;
1299 env->fregs[f1 + 2].ll = x1.ll.lower;
1300 return set_cc_nz_f128(x1.q);
1303 /* 32-bit FP addition RM */
1304 void HELPER(aeb)(uint32_t f1, uint32_t val)
1306 float32 v1 = env->fregs[f1].l.upper;
1307 CPU_FloatU v2;
1308 v2.l = val;
1309 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
1310 v1, f1, v2.f);
1311 env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
1314 /* 32-bit FP division RM */
1315 void HELPER(deb)(uint32_t f1, uint32_t val)
1317 float32 v1 = env->fregs[f1].l.upper;
1318 CPU_FloatU v2;
1319 v2.l = val;
1320 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
1321 v1, f1, v2.f);
1322 env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
1325 /* 32-bit FP multiplication RM */
1326 void HELPER(meeb)(uint32_t f1, uint32_t val)
1328 float32 v1 = env->fregs[f1].l.upper;
1329 CPU_FloatU v2;
1330 v2.l = val;
1331 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
1332 v1, f1, v2.f);
1333 env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
1336 /* 32-bit FP compare RR */
1337 uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
1339 float32 v1 = env->fregs[f1].l.upper;
1340 float32 v2 = env->fregs[f2].l.upper;
1341 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
1342 v1, f1, v2);
1343 return set_cc_f32(v1, v2);
1346 /* 64-bit FP compare RR */
1347 uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
1349 float64 v1 = env->fregs[f1].d;
1350 float64 v2 = env->fregs[f2].d;
1351 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
1352 v1, f1, v2);
1353 return set_cc_f64(v1, v2);
1356 /* 128-bit FP compare RR */
1357 uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
1359 CPU_QuadU v1;
1360 v1.ll.upper = env->fregs[f1].ll;
1361 v1.ll.lower = env->fregs[f1 + 2].ll;
1362 CPU_QuadU v2;
1363 v2.ll.upper = env->fregs[f2].ll;
1364 v2.ll.lower = env->fregs[f2 + 2].ll;
1366 return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
1367 &env->fpu_status));
1370 /* 64-bit FP compare RM */
1371 uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
1373 float64 v1 = env->fregs[f1].d;
1374 CPU_DoubleU v2;
1375 v2.ll = ldq(a2);
1376 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
1377 f1, v2.d);
1378 return set_cc_f64(v1, v2.d);
1381 /* 64-bit FP addition RM */
1382 uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
1384 float64 v1 = env->fregs[f1].d;
1385 CPU_DoubleU v2;
1386 v2.ll = ldq(a2);
1387 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
1388 v1, f1, v2.d);
1389 env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
1390 return set_cc_nz_f64(v1);
1393 /* 32-bit FP subtraction RM */
1394 void HELPER(seb)(uint32_t f1, uint32_t val)
1396 float32 v1 = env->fregs[f1].l.upper;
1397 CPU_FloatU v2;
1398 v2.l = val;
1399 env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
1402 /* 64-bit FP subtraction RM */
1403 uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
1405 float64 v1 = env->fregs[f1].d;
1406 CPU_DoubleU v2;
1407 v2.ll = ldq(a2);
1408 env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
1409 return set_cc_nz_f64(v1);
1412 /* 64-bit FP multiplication RM */
1413 void HELPER(mdb)(uint32_t f1, uint64_t a2)
1415 float64 v1 = env->fregs[f1].d;
1416 CPU_DoubleU v2;
1417 v2.ll = ldq(a2);
1418 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
1419 v1, f1, v2.d);
1420 env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
1423 /* 64-bit FP division RM */
1424 void HELPER(ddb)(uint32_t f1, uint64_t a2)
1426 float64 v1 = env->fregs[f1].d;
1427 CPU_DoubleU v2;
1428 v2.ll = ldq(a2);
1429 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
1430 v1, f1, v2.d);
1431 env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
1434 static void set_round_mode(int m3)
1436 switch (m3) {
1437 case 0:
1438 /* current mode */
1439 break;
1440 case 1:
1441 /* biased round no nearest */
1442 case 4:
1443 /* round to nearest */
1444 set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
1445 break;
1446 case 5:
1447 /* round to zero */
1448 set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
1449 break;
1450 case 6:
1451 /* round to +inf */
1452 set_float_rounding_mode(float_round_up, &env->fpu_status);
1453 break;
1454 case 7:
1455 /* round to -inf */
1456 set_float_rounding_mode(float_round_down, &env->fpu_status);
1457 break;
1461 /* convert 32-bit float to 64-bit int */
1462 uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1464 float32 v2 = env->fregs[f2].l.upper;
1465 set_round_mode(m3);
1466 env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
1467 return set_cc_nz_f32(v2);
1470 /* convert 64-bit float to 64-bit int */
1471 uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1473 float64 v2 = env->fregs[f2].d;
1474 set_round_mode(m3);
1475 env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
1476 return set_cc_nz_f64(v2);
1479 /* convert 128-bit float to 64-bit int */
1480 uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1482 CPU_QuadU v2;
1483 v2.ll.upper = env->fregs[f2].ll;
1484 v2.ll.lower = env->fregs[f2 + 2].ll;
1485 set_round_mode(m3);
1486 env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
1487 if (float128_is_any_nan(v2.q)) {
1488 return 3;
1489 } else if (float128_is_zero(v2.q)) {
1490 return 0;
1491 } else if (float128_is_neg(v2.q)) {
1492 return 1;
1493 } else {
1494 return 2;
1498 /* convert 32-bit float to 32-bit int */
1499 uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1501 float32 v2 = env->fregs[f2].l.upper;
1502 set_round_mode(m3);
1503 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1504 float32_to_int32(v2, &env->fpu_status);
1505 return set_cc_nz_f32(v2);
1508 /* convert 64-bit float to 32-bit int */
1509 uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1511 float64 v2 = env->fregs[f2].d;
1512 set_round_mode(m3);
1513 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1514 float64_to_int32(v2, &env->fpu_status);
1515 return set_cc_nz_f64(v2);
1518 /* convert 128-bit float to 32-bit int */
1519 uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1521 CPU_QuadU v2;
1522 v2.ll.upper = env->fregs[f2].ll;
1523 v2.ll.lower = env->fregs[f2 + 2].ll;
1524 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1525 float128_to_int32(v2.q, &env->fpu_status);
1526 return set_cc_nz_f128(v2.q);
1529 /* load 32-bit FP zero */
1530 void HELPER(lzer)(uint32_t f1)
1532 env->fregs[f1].l.upper = float32_zero;
1535 /* load 64-bit FP zero */
1536 void HELPER(lzdr)(uint32_t f1)
1538 env->fregs[f1].d = float64_zero;
1541 /* load 128-bit FP zero */
1542 void HELPER(lzxr)(uint32_t f1)
1544 CPU_QuadU x;
1545 x.q = float64_to_float128(float64_zero, &env->fpu_status);
1546 env->fregs[f1].ll = x.ll.upper;
1547 env->fregs[f1 + 1].ll = x.ll.lower;
1550 /* 128-bit FP subtraction RR */
1551 uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
1553 CPU_QuadU v1;
1554 v1.ll.upper = env->fregs[f1].ll;
1555 v1.ll.lower = env->fregs[f1 + 2].ll;
1556 CPU_QuadU v2;
1557 v2.ll.upper = env->fregs[f2].ll;
1558 v2.ll.lower = env->fregs[f2 + 2].ll;
1559 CPU_QuadU res;
1560 res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
1561 env->fregs[f1].ll = res.ll.upper;
1562 env->fregs[f1 + 2].ll = res.ll.lower;
1563 return set_cc_nz_f128(res.q);
1566 /* 128-bit FP addition RR */
1567 uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
1569 CPU_QuadU v1;
1570 v1.ll.upper = env->fregs[f1].ll;
1571 v1.ll.lower = env->fregs[f1 + 2].ll;
1572 CPU_QuadU v2;
1573 v2.ll.upper = env->fregs[f2].ll;
1574 v2.ll.lower = env->fregs[f2 + 2].ll;
1575 CPU_QuadU res;
1576 res.q = float128_add(v1.q, v2.q, &env->fpu_status);
1577 env->fregs[f1].ll = res.ll.upper;
1578 env->fregs[f1 + 2].ll = res.ll.lower;
1579 return set_cc_nz_f128(res.q);
1582 /* 32-bit FP multiplication RR */
1583 void HELPER(meebr)(uint32_t f1, uint32_t f2)
1585 env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
1586 env->fregs[f2].l.upper,
1587 &env->fpu_status);
1590 /* 64-bit FP division RR */
1591 void HELPER(ddbr)(uint32_t f1, uint32_t f2)
1593 env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
1594 &env->fpu_status);
1597 /* 64-bit FP multiply and add RM */
1598 void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
1600 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
1601 CPU_DoubleU v2;
1602 v2.ll = ldq(a2);
1603 env->fregs[f1].d = float64_add(env->fregs[f1].d,
1604 float64_mul(v2.d, env->fregs[f3].d,
1605 &env->fpu_status),
1606 &env->fpu_status);
1609 /* 64-bit FP multiply and add RR */
1610 void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1612 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1613 env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
1614 env->fregs[f3].d,
1615 &env->fpu_status),
1616 env->fregs[f1].d, &env->fpu_status);
1619 /* 64-bit FP multiply and subtract RR */
1620 void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1622 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1623 env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
1624 env->fregs[f3].d,
1625 &env->fpu_status),
1626 env->fregs[f1].d, &env->fpu_status);
1629 /* 32-bit FP multiply and add RR */
1630 void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
1632 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1633 float32_mul(env->fregs[f2].l.upper,
1634 env->fregs[f3].l.upper,
1635 &env->fpu_status),
1636 &env->fpu_status);
1639 /* convert 32-bit float to 64-bit float */
1640 void HELPER(ldeb)(uint32_t f1, uint64_t a2)
1642 uint32_t v2;
1643 v2 = ldl(a2);
1644 env->fregs[f1].d = float32_to_float64(v2,
1645 &env->fpu_status);
1648 /* convert 64-bit float to 128-bit float */
1649 void HELPER(lxdb)(uint32_t f1, uint64_t a2)
1651 CPU_DoubleU v2;
1652 v2.ll = ldq(a2);
1653 CPU_QuadU v1;
1654 v1.q = float64_to_float128(v2.d, &env->fpu_status);
1655 env->fregs[f1].ll = v1.ll.upper;
1656 env->fregs[f1 + 2].ll = v1.ll.lower;
1659 /* test data class 32-bit */
1660 uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
1662 float32 v1 = env->fregs[f1].l.upper;
1663 int neg = float32_is_neg(v1);
1664 uint32_t cc = 0;
1666 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
1667 if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1668 (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1669 (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1670 (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1671 cc = 1;
1672 } else if (m2 & (1 << (9-neg))) {
1673 /* assume normalized number */
1674 cc = 1;
1677 /* FIXME: denormalized? */
1678 return cc;
1681 /* test data class 64-bit */
1682 uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
1684 float64 v1 = env->fregs[f1].d;
1685 int neg = float64_is_neg(v1);
1686 uint32_t cc = 0;
1688 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
1689 if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1690 (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1691 (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1692 (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1693 cc = 1;
1694 } else if (m2 & (1 << (9-neg))) {
1695 /* assume normalized number */
1696 cc = 1;
1698 /* FIXME: denormalized? */
1699 return cc;
1702 /* test data class 128-bit */
1703 uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
1705 CPU_QuadU v1;
1706 uint32_t cc = 0;
1707 v1.ll.upper = env->fregs[f1].ll;
1708 v1.ll.lower = env->fregs[f1 + 2].ll;
1710 int neg = float128_is_neg(v1.q);
1711 if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
1712 (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
1713 (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
1714 (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
1715 cc = 1;
1716 } else if (m2 & (1 << (9-neg))) {
1717 /* assume normalized number */
1718 cc = 1;
1720 /* FIXME: denormalized? */
1721 return cc;
1724 /* find leftmost one */
1725 uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
1727 uint64_t res = 0;
1728 uint64_t ov2 = v2;
1730 while (!(v2 & 0x8000000000000000ULL) && v2) {
1731 v2 <<= 1;
1732 res++;
1735 if (!v2) {
1736 env->regs[r1] = 64;
1737 env->regs[r1 + 1] = 0;
1738 return 0;
1739 } else {
1740 env->regs[r1] = res;
1741 env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
1742 return 2;
1746 /* square root 64-bit RR */
1747 void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
1749 env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
1752 /* checksum */
1753 void HELPER(cksm)(uint32_t r1, uint32_t r2)
1755 uint64_t src = get_address_31fix(r2);
1756 uint64_t src_len = env->regs[(r2 + 1) & 15];
1757 uint64_t cksm = (uint32_t)env->regs[r1];
1759 while (src_len >= 4) {
1760 cksm += ldl(src);
1762 /* move to next word */
1763 src_len -= 4;
1764 src += 4;
1767 switch (src_len) {
1768 case 0:
1769 break;
1770 case 1:
1771 cksm += ldub(src) << 24;
1772 break;
1773 case 2:
1774 cksm += lduw(src) << 16;
1775 break;
1776 case 3:
1777 cksm += lduw(src) << 16;
1778 cksm += ldub(src + 2) << 8;
1779 break;
1782 /* indicate we've processed everything */
1783 env->regs[r2] = src + src_len;
1784 env->regs[(r2 + 1) & 15] = 0;
1786 /* store result */
1787 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1788 ((uint32_t)cksm + (cksm >> 32));
1791 static inline uint32_t cc_calc_ltgt_32(CPUS390XState *env, int32_t src,
1792 int32_t dst)
1794 if (src == dst) {
1795 return 0;
1796 } else if (src < dst) {
1797 return 1;
1798 } else {
1799 return 2;
1803 static inline uint32_t cc_calc_ltgt0_32(CPUS390XState *env, int32_t dst)
1805 return cc_calc_ltgt_32(env, dst, 0);
1808 static inline uint32_t cc_calc_ltgt_64(CPUS390XState *env, int64_t src,
1809 int64_t dst)
1811 if (src == dst) {
1812 return 0;
1813 } else if (src < dst) {
1814 return 1;
1815 } else {
1816 return 2;
1820 static inline uint32_t cc_calc_ltgt0_64(CPUS390XState *env, int64_t dst)
1822 return cc_calc_ltgt_64(env, dst, 0);
1825 static inline uint32_t cc_calc_ltugtu_32(CPUS390XState *env, uint32_t src,
1826 uint32_t dst)
1828 if (src == dst) {
1829 return 0;
1830 } else if (src < dst) {
1831 return 1;
1832 } else {
1833 return 2;
1837 static inline uint32_t cc_calc_ltugtu_64(CPUS390XState *env, uint64_t src,
1838 uint64_t dst)
1840 if (src == dst) {
1841 return 0;
1842 } else if (src < dst) {
1843 return 1;
1844 } else {
1845 return 2;
1849 static inline uint32_t cc_calc_tm_32(CPUS390XState *env, uint32_t val, uint32_t mask)
1851 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
1852 uint16_t r = val & mask;
1853 if (r == 0 || mask == 0) {
1854 return 0;
1855 } else if (r == mask) {
1856 return 3;
1857 } else {
1858 return 1;
1862 /* set condition code for test under mask */
1863 static inline uint32_t cc_calc_tm_64(CPUS390XState *env, uint64_t val, uint32_t mask)
1865 uint16_t r = val & mask;
1866 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
1867 if (r == 0 || mask == 0) {
1868 return 0;
1869 } else if (r == mask) {
1870 return 3;
1871 } else {
1872 while (!(mask & 0x8000)) {
1873 mask <<= 1;
1874 val <<= 1;
1876 if (val & 0x8000) {
1877 return 2;
1878 } else {
1879 return 1;
1884 static inline uint32_t cc_calc_nz(CPUS390XState *env, uint64_t dst)
1886 return !!dst;
1889 static inline uint32_t cc_calc_add_64(CPUS390XState *env, int64_t a1, int64_t a2,
1890 int64_t ar)
1892 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1893 return 3; /* overflow */
1894 } else {
1895 if (ar < 0) {
1896 return 1;
1897 } else if (ar > 0) {
1898 return 2;
1899 } else {
1900 return 0;
1905 static inline uint32_t cc_calc_addu_64(CPUS390XState *env, uint64_t a1, uint64_t a2,
1906 uint64_t ar)
1908 if (ar == 0) {
1909 if (a1) {
1910 return 2;
1911 } else {
1912 return 0;
1914 } else {
1915 if (ar < a1 || ar < a2) {
1916 return 3;
1917 } else {
1918 return 1;
1923 static inline uint32_t cc_calc_sub_64(CPUS390XState *env, int64_t a1, int64_t a2,
1924 int64_t ar)
1926 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
1927 return 3; /* overflow */
1928 } else {
1929 if (ar < 0) {
1930 return 1;
1931 } else if (ar > 0) {
1932 return 2;
1933 } else {
1934 return 0;
1939 static inline uint32_t cc_calc_subu_64(CPUS390XState *env, uint64_t a1, uint64_t a2,
1940 uint64_t ar)
1942 if (ar == 0) {
1943 return 2;
1944 } else {
1945 if (a2 > a1) {
1946 return 1;
1947 } else {
1948 return 3;
1953 static inline uint32_t cc_calc_abs_64(CPUS390XState *env, int64_t dst)
1955 if ((uint64_t)dst == 0x8000000000000000ULL) {
1956 return 3;
1957 } else if (dst) {
1958 return 1;
1959 } else {
1960 return 0;
1964 static inline uint32_t cc_calc_nabs_64(CPUS390XState *env, int64_t dst)
1966 return !!dst;
1969 static inline uint32_t cc_calc_comp_64(CPUS390XState *env, int64_t dst)
1971 if ((uint64_t)dst == 0x8000000000000000ULL) {
1972 return 3;
1973 } else if (dst < 0) {
1974 return 1;
1975 } else if (dst > 0) {
1976 return 2;
1977 } else {
1978 return 0;
1983 static inline uint32_t cc_calc_add_32(CPUS390XState *env, int32_t a1, int32_t a2,
1984 int32_t ar)
1986 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1987 return 3; /* overflow */
1988 } else {
1989 if (ar < 0) {
1990 return 1;
1991 } else if (ar > 0) {
1992 return 2;
1993 } else {
1994 return 0;
1999 static inline uint32_t cc_calc_addu_32(CPUS390XState *env, uint32_t a1, uint32_t a2,
2000 uint32_t ar)
2002 if (ar == 0) {
2003 if (a1) {
2004 return 2;
2005 } else {
2006 return 0;
2008 } else {
2009 if (ar < a1 || ar < a2) {
2010 return 3;
2011 } else {
2012 return 1;
2017 static inline uint32_t cc_calc_sub_32(CPUS390XState *env, int32_t a1, int32_t a2,
2018 int32_t ar)
2020 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
2021 return 3; /* overflow */
2022 } else {
2023 if (ar < 0) {
2024 return 1;
2025 } else if (ar > 0) {
2026 return 2;
2027 } else {
2028 return 0;
2033 static inline uint32_t cc_calc_subu_32(CPUS390XState *env, uint32_t a1, uint32_t a2,
2034 uint32_t ar)
2036 if (ar == 0) {
2037 return 2;
2038 } else {
2039 if (a2 > a1) {
2040 return 1;
2041 } else {
2042 return 3;
2047 static inline uint32_t cc_calc_abs_32(CPUS390XState *env, int32_t dst)
2049 if ((uint32_t)dst == 0x80000000UL) {
2050 return 3;
2051 } else if (dst) {
2052 return 1;
2053 } else {
2054 return 0;
2058 static inline uint32_t cc_calc_nabs_32(CPUS390XState *env, int32_t dst)
2060 return !!dst;
2063 static inline uint32_t cc_calc_comp_32(CPUS390XState *env, int32_t dst)
2065 if ((uint32_t)dst == 0x80000000UL) {
2066 return 3;
2067 } else if (dst < 0) {
2068 return 1;
2069 } else if (dst > 0) {
2070 return 2;
2071 } else {
2072 return 0;
2076 /* calculate condition code for insert character under mask insn */
2077 static inline uint32_t cc_calc_icm_32(CPUS390XState *env, uint32_t mask, uint32_t val)
2079 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
2080 uint32_t cc;
2082 if (mask == 0xf) {
2083 if (!val) {
2084 return 0;
2085 } else if (val & 0x80000000) {
2086 return 1;
2087 } else {
2088 return 2;
2092 if (!val || !mask) {
2093 cc = 0;
2094 } else {
2095 while (mask != 1) {
2096 mask >>= 1;
2097 val >>= 8;
2099 if (val & 0x80) {
2100 cc = 1;
2101 } else {
2102 cc = 2;
2105 return cc;
2108 static inline uint32_t cc_calc_slag(CPUS390XState *env, uint64_t src, uint64_t shift)
2110 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
2111 uint64_t match, r;
2113 /* check if the sign bit stays the same */
2114 if (src & (1ULL << 63)) {
2115 match = mask;
2116 } else {
2117 match = 0;
2120 if ((src & mask) != match) {
2121 /* overflow */
2122 return 3;
2125 r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
2127 if ((int64_t)r == 0) {
2128 return 0;
2129 } else if ((int64_t)r < 0) {
2130 return 1;
2133 return 2;
2137 static inline uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src,
2138 uint64_t dst, uint64_t vr)
2140 uint32_t r = 0;
2142 switch (cc_op) {
2143 case CC_OP_CONST0:
2144 case CC_OP_CONST1:
2145 case CC_OP_CONST2:
2146 case CC_OP_CONST3:
2147 /* cc_op value _is_ cc */
2148 r = cc_op;
2149 break;
2150 case CC_OP_LTGT0_32:
2151 r = cc_calc_ltgt0_32(env, dst);
2152 break;
2153 case CC_OP_LTGT0_64:
2154 r = cc_calc_ltgt0_64(env, dst);
2155 break;
2156 case CC_OP_LTGT_32:
2157 r = cc_calc_ltgt_32(env, src, dst);
2158 break;
2159 case CC_OP_LTGT_64:
2160 r = cc_calc_ltgt_64(env, src, dst);
2161 break;
2162 case CC_OP_LTUGTU_32:
2163 r = cc_calc_ltugtu_32(env, src, dst);
2164 break;
2165 case CC_OP_LTUGTU_64:
2166 r = cc_calc_ltugtu_64(env, src, dst);
2167 break;
2168 case CC_OP_TM_32:
2169 r = cc_calc_tm_32(env, src, dst);
2170 break;
2171 case CC_OP_TM_64:
2172 r = cc_calc_tm_64(env, src, dst);
2173 break;
2174 case CC_OP_NZ:
2175 r = cc_calc_nz(env, dst);
2176 break;
2177 case CC_OP_ADD_64:
2178 r = cc_calc_add_64(env, src, dst, vr);
2179 break;
2180 case CC_OP_ADDU_64:
2181 r = cc_calc_addu_64(env, src, dst, vr);
2182 break;
2183 case CC_OP_SUB_64:
2184 r = cc_calc_sub_64(env, src, dst, vr);
2185 break;
2186 case CC_OP_SUBU_64:
2187 r = cc_calc_subu_64(env, src, dst, vr);
2188 break;
2189 case CC_OP_ABS_64:
2190 r = cc_calc_abs_64(env, dst);
2191 break;
2192 case CC_OP_NABS_64:
2193 r = cc_calc_nabs_64(env, dst);
2194 break;
2195 case CC_OP_COMP_64:
2196 r = cc_calc_comp_64(env, dst);
2197 break;
2199 case CC_OP_ADD_32:
2200 r = cc_calc_add_32(env, src, dst, vr);
2201 break;
2202 case CC_OP_ADDU_32:
2203 r = cc_calc_addu_32(env, src, dst, vr);
2204 break;
2205 case CC_OP_SUB_32:
2206 r = cc_calc_sub_32(env, src, dst, vr);
2207 break;
2208 case CC_OP_SUBU_32:
2209 r = cc_calc_subu_32(env, src, dst, vr);
2210 break;
2211 case CC_OP_ABS_32:
2212 r = cc_calc_abs_64(env, dst);
2213 break;
2214 case CC_OP_NABS_32:
2215 r = cc_calc_nabs_64(env, dst);
2216 break;
2217 case CC_OP_COMP_32:
2218 r = cc_calc_comp_32(env, dst);
2219 break;
2221 case CC_OP_ICM:
2222 r = cc_calc_icm_32(env, src, dst);
2223 break;
2224 case CC_OP_SLAG:
2225 r = cc_calc_slag(env, src, dst);
2226 break;
2228 case CC_OP_LTGT_F32:
2229 r = set_cc_f32(src, dst);
2230 break;
2231 case CC_OP_LTGT_F64:
2232 r = set_cc_f64(src, dst);
2233 break;
2234 case CC_OP_NZ_F32:
2235 r = set_cc_nz_f32(dst);
2236 break;
2237 case CC_OP_NZ_F64:
2238 r = set_cc_nz_f64(dst);
2239 break;
2241 default:
2242 cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
2245 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
2246 cc_name(cc_op), src, dst, vr, r);
2247 return r;
2250 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
2251 uint64_t vr)
2253 return do_calc_cc(env, cc_op, src, dst, vr);
2256 uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
2257 uint64_t vr)
2259 return do_calc_cc(env, cc_op, src, dst, vr);
2262 uint64_t HELPER(cvd)(int32_t bin)
2264 /* positive 0 */
2265 uint64_t dec = 0x0c;
2266 int shift = 4;
2268 if (bin < 0) {
2269 bin = -bin;
2270 dec = 0x0d;
2273 for (shift = 4; (shift < 64) && bin; shift += 4) {
2274 int current_number = bin % 10;
2276 dec |= (current_number) << shift;
2277 bin /= 10;
2280 return dec;
2283 void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
2285 int len_dest = len >> 4;
2286 int len_src = len & 0xf;
2287 uint8_t b;
2288 int second_nibble = 0;
2290 dest += len_dest;
2291 src += len_src;
2293 /* last byte is special, it only flips the nibbles */
2294 b = ldub(src);
2295 stb(dest, (b << 4) | (b >> 4));
2296 src--;
2297 len_src--;
2299 /* now pad every nibble with 0xf0 */
2301 while (len_dest > 0) {
2302 uint8_t cur_byte = 0;
2304 if (len_src > 0) {
2305 cur_byte = ldub(src);
2308 len_dest--;
2309 dest--;
2311 /* only advance one nibble at a time */
2312 if (second_nibble) {
2313 cur_byte >>= 4;
2314 len_src--;
2315 src--;
2317 second_nibble = !second_nibble;
2319 /* digit */
2320 cur_byte = (cur_byte & 0xf);
2321 /* zone bits */
2322 cur_byte |= 0xf0;
2324 stb(dest, cur_byte);
2328 void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
2330 int i;
2332 for (i = 0; i <= len; i++) {
2333 uint8_t byte = ldub(array + i);
2334 uint8_t new_byte = ldub(trans + byte);
2335 stb(array + i, new_byte);
2339 #ifndef CONFIG_USER_ONLY
2341 void HELPER(load_psw)(uint64_t mask, uint64_t addr)
2343 load_psw(env, mask, addr);
2344 cpu_loop_exit(env);
2347 static void program_interrupt(CPUS390XState *env, uint32_t code, int ilc)
2349 qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
2351 if (kvm_enabled()) {
2352 #ifdef CONFIG_KVM
2353 kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
2354 #endif
2355 } else {
2356 env->int_pgm_code = code;
2357 env->int_pgm_ilc = ilc;
2358 env->exception_index = EXCP_PGM;
2359 cpu_loop_exit(env);
2363 static void ext_interrupt(CPUS390XState *env, int type, uint32_t param,
2364 uint64_t param64)
2366 cpu_inject_ext(env, type, param, param64);
2369 int sclp_service_call(CPUS390XState *env, uint32_t sccb, uint64_t code)
2371 int r = 0;
2372 int shift = 0;
2374 #ifdef DEBUG_HELPER
2375 printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
2376 #endif
2378 if (sccb & ~0x7ffffff8ul) {
2379 fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
2380 r = -1;
2381 goto out;
2384 switch(code) {
2385 case SCLP_CMDW_READ_SCP_INFO:
2386 case SCLP_CMDW_READ_SCP_INFO_FORCED:
2387 while ((ram_size >> (20 + shift)) > 65535) {
2388 shift++;
2390 stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift));
2391 stb_phys(sccb + SCP_INCREMENT, 1 << shift);
2392 stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
2394 if (kvm_enabled()) {
2395 #ifdef CONFIG_KVM
2396 kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
2397 sccb & ~3, 0, 1);
2398 #endif
2399 } else {
2400 env->psw.addr += 4;
2401 ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
2403 break;
2404 default:
2405 #ifdef DEBUG_HELPER
2406 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
2407 #endif
2408 r = -1;
2409 break;
2412 out:
2413 return r;
2416 /* SCLP service call */
2417 uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
2419 if (sclp_service_call(env, r1, r2)) {
2420 return 3;
2423 return 0;
2426 /* DIAG */
2427 uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
2429 uint64_t r;
2431 switch (num) {
2432 case 0x500:
2433 /* KVM hypercall */
2434 r = s390_virtio_hypercall(env, mem, code);
2435 break;
2436 case 0x44:
2437 /* yield */
2438 r = 0;
2439 break;
2440 case 0x308:
2441 /* ipl */
2442 r = 0;
2443 break;
2444 default:
2445 r = -1;
2446 break;
2449 if (r) {
2450 program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
2453 return r;
2456 /* Store CPU ID */
2457 void HELPER(stidp)(uint64_t a1)
2459 stq(a1, env->cpu_num);
2462 /* Set Prefix */
2463 void HELPER(spx)(uint64_t a1)
2465 uint32_t prefix;
2467 prefix = ldl(a1);
2468 env->psa = prefix & 0xfffff000;
2469 qemu_log("prefix: %#x\n", prefix);
2470 tlb_flush_page(env, 0);
2471 tlb_flush_page(env, TARGET_PAGE_SIZE);
2474 /* Set Clock */
2475 uint32_t HELPER(sck)(uint64_t a1)
2477 /* XXX not implemented - is it necessary? */
2479 return 0;
2482 static inline uint64_t clock_value(CPUS390XState *env)
2484 uint64_t time;
2486 time = env->tod_offset +
2487 time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
2489 return time;
2492 /* Store Clock */
2493 uint32_t HELPER(stck)(uint64_t a1)
2495 stq(a1, clock_value(env));
2497 return 0;
2500 /* Store Clock Extended */
2501 uint32_t HELPER(stcke)(uint64_t a1)
2503 stb(a1, 0);
2504 /* basically the same value as stck */
2505 stq(a1 + 1, clock_value(env) | env->cpu_num);
2506 /* more fine grained than stck */
2507 stq(a1 + 9, 0);
2508 /* XXX programmable fields */
2509 stw(a1 + 17, 0);
2512 return 0;
2515 /* Set Clock Comparator */
2516 void HELPER(sckc)(uint64_t a1)
2518 uint64_t time = ldq(a1);
2520 if (time == -1ULL) {
2521 return;
2524 /* difference between now and then */
2525 time -= clock_value(env);
2526 /* nanoseconds */
2527 time = (time * 125) >> 9;
2529 qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
2532 /* Store Clock Comparator */
2533 void HELPER(stckc)(uint64_t a1)
2535 /* XXX implement */
2536 stq(a1, 0);
2539 /* Set CPU Timer */
2540 void HELPER(spt)(uint64_t a1)
2542 uint64_t time = ldq(a1);
2544 if (time == -1ULL) {
2545 return;
2548 /* nanoseconds */
2549 time = (time * 125) >> 9;
2551 qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
2554 /* Store CPU Timer */
2555 void HELPER(stpt)(uint64_t a1)
2557 /* XXX implement */
2558 stq(a1, 0);
2561 /* Store System Information */
2562 uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
2564 int cc = 0;
2565 int sel1, sel2;
2567 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
2568 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
2569 /* valid function code, invalid reserved bits */
2570 program_interrupt(env, PGM_SPECIFICATION, 2);
2573 sel1 = r0 & STSI_R0_SEL1_MASK;
2574 sel2 = r1 & STSI_R1_SEL2_MASK;
2576 /* XXX: spec exception if sysib is not 4k-aligned */
2578 switch (r0 & STSI_LEVEL_MASK) {
2579 case STSI_LEVEL_1:
2580 if ((sel1 == 1) && (sel2 == 1)) {
2581 /* Basic Machine Configuration */
2582 struct sysib_111 sysib;
2584 memset(&sysib, 0, sizeof(sysib));
2585 ebcdic_put(sysib.manuf, "QEMU ", 16);
2586 /* same as machine type number in STORE CPU ID */
2587 ebcdic_put(sysib.type, "QEMU", 4);
2588 /* same as model number in STORE CPU ID */
2589 ebcdic_put(sysib.model, "QEMU ", 16);
2590 ebcdic_put(sysib.sequence, "QEMU ", 16);
2591 ebcdic_put(sysib.plant, "QEMU", 4);
2592 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2593 } else if ((sel1 == 2) && (sel2 == 1)) {
2594 /* Basic Machine CPU */
2595 struct sysib_121 sysib;
2597 memset(&sysib, 0, sizeof(sysib));
2598 /* XXX make different for different CPUs? */
2599 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2600 ebcdic_put(sysib.plant, "QEMU", 4);
2601 stw_p(&sysib.cpu_addr, env->cpu_num);
2602 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2603 } else if ((sel1 == 2) && (sel2 == 2)) {
2604 /* Basic Machine CPUs */
2605 struct sysib_122 sysib;
2607 memset(&sysib, 0, sizeof(sysib));
2608 stl_p(&sysib.capability, 0x443afc29);
2609 /* XXX change when SMP comes */
2610 stw_p(&sysib.total_cpus, 1);
2611 stw_p(&sysib.active_cpus, 1);
2612 stw_p(&sysib.standby_cpus, 0);
2613 stw_p(&sysib.reserved_cpus, 0);
2614 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2615 } else {
2616 cc = 3;
2618 break;
2619 case STSI_LEVEL_2:
2621 if ((sel1 == 2) && (sel2 == 1)) {
2622 /* LPAR CPU */
2623 struct sysib_221 sysib;
2625 memset(&sysib, 0, sizeof(sysib));
2626 /* XXX make different for different CPUs? */
2627 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2628 ebcdic_put(sysib.plant, "QEMU", 4);
2629 stw_p(&sysib.cpu_addr, env->cpu_num);
2630 stw_p(&sysib.cpu_id, 0);
2631 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2632 } else if ((sel1 == 2) && (sel2 == 2)) {
2633 /* LPAR CPUs */
2634 struct sysib_222 sysib;
2636 memset(&sysib, 0, sizeof(sysib));
2637 stw_p(&sysib.lpar_num, 0);
2638 sysib.lcpuc = 0;
2639 /* XXX change when SMP comes */
2640 stw_p(&sysib.total_cpus, 1);
2641 stw_p(&sysib.conf_cpus, 1);
2642 stw_p(&sysib.standby_cpus, 0);
2643 stw_p(&sysib.reserved_cpus, 0);
2644 ebcdic_put(sysib.name, "QEMU ", 8);
2645 stl_p(&sysib.caf, 1000);
2646 stw_p(&sysib.dedicated_cpus, 0);
2647 stw_p(&sysib.shared_cpus, 0);
2648 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2649 } else {
2650 cc = 3;
2652 break;
2654 case STSI_LEVEL_3:
2656 if ((sel1 == 2) && (sel2 == 2)) {
2657 /* VM CPUs */
2658 struct sysib_322 sysib;
2660 memset(&sysib, 0, sizeof(sysib));
2661 sysib.count = 1;
2662 /* XXX change when SMP comes */
2663 stw_p(&sysib.vm[0].total_cpus, 1);
2664 stw_p(&sysib.vm[0].conf_cpus, 1);
2665 stw_p(&sysib.vm[0].standby_cpus, 0);
2666 stw_p(&sysib.vm[0].reserved_cpus, 0);
2667 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
2668 stl_p(&sysib.vm[0].caf, 1000);
2669 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
2670 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2671 } else {
2672 cc = 3;
2674 break;
2676 case STSI_LEVEL_CURRENT:
2677 env->regs[0] = STSI_LEVEL_3;
2678 break;
2679 default:
2680 cc = 3;
2681 break;
2684 return cc;
2687 void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
2689 int i;
2690 uint64_t src = a2;
2692 for (i = r1;; i = (i + 1) % 16) {
2693 env->cregs[i] = ldq(src);
2694 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
2695 i, src, env->cregs[i]);
2696 src += sizeof(uint64_t);
2698 if (i == r3) {
2699 break;
2703 tlb_flush(env, 1);
2706 void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2708 int i;
2709 uint64_t src = a2;
2711 for (i = r1;; i = (i + 1) % 16) {
2712 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
2713 src += sizeof(uint32_t);
2715 if (i == r3) {
2716 break;
2720 tlb_flush(env, 1);
2723 void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
2725 int i;
2726 uint64_t dest = a2;
2728 for (i = r1;; i = (i + 1) % 16) {
2729 stq(dest, env->cregs[i]);
2730 dest += sizeof(uint64_t);
2732 if (i == r3) {
2733 break;
2738 void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2740 int i;
2741 uint64_t dest = a2;
2743 for (i = r1;; i = (i + 1) % 16) {
2744 stl(dest, env->cregs[i]);
2745 dest += sizeof(uint32_t);
2747 if (i == r3) {
2748 break;
2753 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
2755 /* XXX implement */
2757 return 0;
2760 /* insert storage key extended */
2761 uint64_t HELPER(iske)(uint64_t r2)
2763 uint64_t addr = get_address(0, 0, r2);
2765 if (addr > ram_size) {
2766 return 0;
2769 return env->storage_keys[addr / TARGET_PAGE_SIZE];
2772 /* set storage key extended */
2773 void HELPER(sske)(uint32_t r1, uint64_t r2)
2775 uint64_t addr = get_address(0, 0, r2);
2777 if (addr > ram_size) {
2778 return;
2781 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
2784 /* reset reference bit extended */
2785 uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
2787 uint8_t re;
2788 uint8_t key;
2789 if (r2 > ram_size) {
2790 return 0;
2793 key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
2794 re = key & (SK_R | SK_C);
2795 env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
2798 * cc
2800 * 0 Reference bit zero; change bit zero
2801 * 1 Reference bit zero; change bit one
2802 * 2 Reference bit one; change bit zero
2803 * 3 Reference bit one; change bit one
2806 return re >> 1;
2809 /* compare and swap and purge */
2810 uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
2812 uint32_t cc;
2813 uint32_t o1 = env->regs[r1];
2814 uint64_t a2 = get_address_31fix(r2) & ~3ULL;
2815 uint32_t o2 = ldl(a2);
2817 if (o1 == o2) {
2818 stl(a2, env->regs[(r1 + 1) & 15]);
2819 if (env->regs[r2] & 0x3) {
2820 /* flush TLB / ALB */
2821 tlb_flush(env, 1);
2823 cc = 0;
2824 } else {
2825 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
2826 cc = 1;
2829 return cc;
2832 static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
2833 uint64_t mode2)
2835 target_ulong src, dest;
2836 int flags, cc = 0, i;
2838 if (!l) {
2839 return 0;
2840 } else if (l > 256) {
2841 /* max 256 */
2842 l = 256;
2843 cc = 3;
2846 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
2847 cpu_loop_exit(env);
2849 dest |= a1 & ~TARGET_PAGE_MASK;
2851 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
2852 cpu_loop_exit(env);
2854 src |= a2 & ~TARGET_PAGE_MASK;
2856 /* XXX replace w/ memcpy */
2857 for (i = 0; i < l; i++) {
2858 /* XXX be more clever */
2859 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
2860 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
2861 mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
2862 break;
2864 stb_phys(dest + i, ldub_phys(src + i));
2867 return cc;
2870 uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
2872 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2873 __FUNCTION__, l, a1, a2);
2875 return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
2878 uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
2880 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2881 __FUNCTION__, l, a1, a2);
2883 return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
2886 uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
2888 int cc = 0;
2890 HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
2891 __FUNCTION__, order_code, r1, cpu_addr);
2893 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2894 as parameter (input). Status (output) is always R1. */
2896 switch (order_code) {
2897 case SIGP_SET_ARCH:
2898 /* switch arch */
2899 break;
2900 case SIGP_SENSE:
2901 /* enumerate CPU status */
2902 if (cpu_addr) {
2903 /* XXX implement when SMP comes */
2904 return 3;
2906 env->regs[r1] &= 0xffffffff00000000ULL;
2907 cc = 1;
2908 break;
2909 #if !defined (CONFIG_USER_ONLY)
2910 case SIGP_RESTART:
2911 qemu_system_reset_request();
2912 cpu_loop_exit(env);
2913 break;
2914 case SIGP_STOP:
2915 qemu_system_shutdown_request();
2916 cpu_loop_exit(env);
2917 break;
2918 #endif
2919 default:
2920 /* unknown sigp */
2921 fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
2922 cc = 3;
2925 return cc;
2928 void HELPER(sacf)(uint64_t a1)
2930 HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
2932 switch (a1 & 0xf00) {
2933 case 0x000:
2934 env->psw.mask &= ~PSW_MASK_ASC;
2935 env->psw.mask |= PSW_ASC_PRIMARY;
2936 break;
2937 case 0x100:
2938 env->psw.mask &= ~PSW_MASK_ASC;
2939 env->psw.mask |= PSW_ASC_SECONDARY;
2940 break;
2941 case 0x300:
2942 env->psw.mask &= ~PSW_MASK_ASC;
2943 env->psw.mask |= PSW_ASC_HOME;
2944 break;
2945 default:
2946 qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
2947 program_interrupt(env, PGM_SPECIFICATION, 2);
2948 break;
2952 /* invalidate pte */
2953 void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
2955 uint64_t page = vaddr & TARGET_PAGE_MASK;
2956 uint64_t pte = 0;
2958 /* XXX broadcast to other CPUs */
2960 /* XXX Linux is nice enough to give us the exact pte address.
2961 According to spec we'd have to find it out ourselves */
2962 /* XXX Linux is fine with overwriting the pte, the spec requires
2963 us to only set the invalid bit */
2964 stq_phys(pte_addr, pte | _PAGE_INVALID);
2966 /* XXX we exploit the fact that Linux passes the exact virtual
2967 address here - it's not obliged to! */
2968 tlb_flush_page(env, page);
2970 /* XXX 31-bit hack */
2971 if (page & 0x80000000) {
2972 tlb_flush_page(env, page & ~0x80000000);
2973 } else {
2974 tlb_flush_page(env, page | 0x80000000);
2978 /* flush local tlb */
2979 void HELPER(ptlb)(void)
2981 tlb_flush(env, 1);
2984 /* store using real address */
2985 void HELPER(stura)(uint64_t addr, uint32_t v1)
2987 stw_phys(get_address(0, 0, addr), v1);
2990 /* load real address */
2991 uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
2993 uint32_t cc = 0;
2994 int old_exc = env->exception_index;
2995 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2996 uint64_t ret;
2997 int flags;
2999 /* XXX incomplete - has more corner cases */
3000 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
3001 program_interrupt(env, PGM_SPECIAL_OP, 2);
3004 env->exception_index = old_exc;
3005 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
3006 cc = 3;
3008 if (env->exception_index == EXCP_PGM) {
3009 ret = env->int_pgm_code | 0x80000000;
3010 } else {
3011 ret |= addr & ~TARGET_PAGE_MASK;
3013 env->exception_index = old_exc;
3015 if (!(env->psw.mask & PSW_MASK_64)) {
3016 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
3017 } else {
3018 env->regs[r1] = ret;
3021 return cc;
3024 #endif