fix out of tree build
[qemu/ar7.git] / target-s390x / op_helper.c
blob137bae74a09a6c6c2b40aa065d05b20ff4c0f037
1 /*
2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
24 #include "helpers.h"
25 #include <string.h>
26 #include "kvm.h"
27 #include "qemu-timer.h"
28 #ifdef CONFIG_KVM
29 #include <linux/kvm.h>
30 #endif
32 #if !defined (CONFIG_USER_ONLY)
33 #include "sysemu.h"
34 #endif
36 /*****************************************************************************/
37 /* Softmmu support */
38 #if !defined (CONFIG_USER_ONLY)
39 #include "softmmu_exec.h"
41 #define MMUSUFFIX _mmu
43 #define SHIFT 0
44 #include "softmmu_template.h"
46 #define SHIFT 1
47 #include "softmmu_template.h"
49 #define SHIFT 2
50 #include "softmmu_template.h"
52 #define SHIFT 3
53 #include "softmmu_template.h"
55 /* try to fill the TLB and return an exception if error. If retaddr is
56 NULL, it means that the function was called in C code (i.e. not
57 from generated code or from helper.c) */
58 /* XXX: fix it to restore all registers */
59 void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
60 void *retaddr)
62 TranslationBlock *tb;
63 CPUState *saved_env;
64 unsigned long pc;
65 int ret;
67 saved_env = env;
68 env = env1;
69 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
70 if (unlikely(ret != 0)) {
71 if (likely(retaddr)) {
72 /* now we have a real cpu fault */
73 pc = (unsigned long)retaddr;
74 tb = tb_find_pc(pc);
75 if (likely(tb)) {
76 /* the PC is inside the translated code. It means that we have
77 a virtual CPU fault */
78 cpu_restore_state(tb, env, pc);
81 cpu_loop_exit(env);
83 env = saved_env;
86 #endif
88 /* #define DEBUG_HELPER */
89 #ifdef DEBUG_HELPER
90 #define HELPER_LOG(x...) qemu_log(x)
91 #else
92 #define HELPER_LOG(x...)
93 #endif
95 /* raise an exception */
96 void HELPER(exception)(uint32_t excp)
98 HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
99 env->exception_index = excp;
100 cpu_loop_exit(env);
103 #ifndef CONFIG_USER_ONLY
104 static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest,
105 uint8_t byte)
107 target_phys_addr_t dest_phys;
108 target_phys_addr_t len = l;
109 void *dest_p;
110 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
111 int flags;
113 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
114 stb(dest, byte);
115 cpu_abort(env, "should never reach here");
117 dest_phys |= dest & ~TARGET_PAGE_MASK;
119 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
121 memset(dest_p, byte, len);
123 cpu_physical_memory_unmap(dest_p, 1, len, len);
126 static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest,
127 uint64_t src)
129 target_phys_addr_t dest_phys;
130 target_phys_addr_t src_phys;
131 target_phys_addr_t len = l;
132 void *dest_p;
133 void *src_p;
134 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
135 int flags;
137 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
138 stb(dest, 0);
139 cpu_abort(env, "should never reach here");
141 dest_phys |= dest & ~TARGET_PAGE_MASK;
143 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
144 ldub(src);
145 cpu_abort(env, "should never reach here");
147 src_phys |= src & ~TARGET_PAGE_MASK;
149 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
150 src_p = cpu_physical_memory_map(src_phys, &len, 0);
152 memmove(dest_p, src_p, len);
154 cpu_physical_memory_unmap(dest_p, 1, len, len);
155 cpu_physical_memory_unmap(src_p, 0, len, len);
157 #endif
159 /* and on array */
160 uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
162 int i;
163 unsigned char x;
164 uint32_t cc = 0;
166 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
167 __FUNCTION__, l, dest, src);
168 for (i = 0; i <= l; i++) {
169 x = ldub(dest + i) & ldub(src + i);
170 if (x) {
171 cc = 1;
173 stb(dest + i, x);
175 return cc;
178 /* xor on array */
179 uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
181 int i;
182 unsigned char x;
183 uint32_t cc = 0;
185 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
186 __FUNCTION__, l, dest, src);
188 #ifndef CONFIG_USER_ONLY
189 /* xor with itself is the same as memset(0) */
190 if ((l > 32) && (src == dest) &&
191 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
192 mvc_fast_memset(env, l + 1, dest, 0);
193 return 0;
195 #else
196 if (src == dest) {
197 memset(g2h(dest), 0, l + 1);
198 return 0;
200 #endif
202 for (i = 0; i <= l; i++) {
203 x = ldub(dest + i) ^ ldub(src + i);
204 if (x) {
205 cc = 1;
207 stb(dest + i, x);
209 return cc;
212 /* or on array */
213 uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
215 int i;
216 unsigned char x;
217 uint32_t cc = 0;
219 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
220 __FUNCTION__, l, dest, src);
221 for (i = 0; i <= l; i++) {
222 x = ldub(dest + i) | ldub(src + i);
223 if (x) {
224 cc = 1;
226 stb(dest + i, x);
228 return cc;
231 /* memmove */
232 void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
234 int i = 0;
235 int x = 0;
236 uint32_t l_64 = (l + 1) / 8;
238 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
239 __FUNCTION__, l, dest, src);
241 #ifndef CONFIG_USER_ONLY
242 if ((l > 32) &&
243 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
244 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
245 if (dest == (src + 1)) {
246 mvc_fast_memset(env, l + 1, dest, ldub(src));
247 return;
248 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
249 mvc_fast_memmove(env, l + 1, dest, src);
250 return;
253 #else
254 if (dest == (src + 1)) {
255 memset(g2h(dest), ldub(src), l + 1);
256 return;
257 } else {
258 memmove(g2h(dest), g2h(src), l + 1);
259 return;
261 #endif
263 /* handle the parts that fit into 8-byte loads/stores */
264 if (dest != (src + 1)) {
265 for (i = 0; i < l_64; i++) {
266 stq(dest + x, ldq(src + x));
267 x += 8;
271 /* slow version crossing pages with byte accesses */
272 for (i = x; i <= l; i++) {
273 stb(dest + i, ldub(src + i));
277 /* compare unsigned byte arrays */
278 uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
280 int i;
281 unsigned char x,y;
282 uint32_t cc;
283 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
284 __FUNCTION__, l, s1, s2);
285 for (i = 0; i <= l; i++) {
286 x = ldub(s1 + i);
287 y = ldub(s2 + i);
288 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
289 if (x < y) {
290 cc = 1;
291 goto done;
292 } else if (x > y) {
293 cc = 2;
294 goto done;
297 cc = 0;
298 done:
299 HELPER_LOG("\n");
300 return cc;
303 /* compare logical under mask */
304 uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
306 uint8_t r,d;
307 uint32_t cc;
308 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
309 mask, addr);
310 cc = 0;
311 while (mask) {
312 if (mask & 8) {
313 d = ldub(addr);
314 r = (r1 & 0xff000000UL) >> 24;
315 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
316 addr);
317 if (r < d) {
318 cc = 1;
319 break;
320 } else if (r > d) {
321 cc = 2;
322 break;
324 addr++;
326 mask = (mask << 1) & 0xf;
327 r1 <<= 8;
329 HELPER_LOG("\n");
330 return cc;
333 /* store character under mask */
334 void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
336 uint8_t r;
337 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
338 addr);
339 while (mask) {
340 if (mask & 8) {
341 r = (r1 & 0xff000000UL) >> 24;
342 stb(addr, r);
343 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
344 addr++;
346 mask = (mask << 1) & 0xf;
347 r1 <<= 8;
349 HELPER_LOG("\n");
352 /* 64/64 -> 128 unsigned multiplication */
353 void HELPER(mlg)(uint32_t r1, uint64_t v2)
355 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
356 /* assuming 64-bit hosts have __uint128_t */
357 __uint128_t res = (__uint128_t)env->regs[r1 + 1];
358 res *= (__uint128_t)v2;
359 env->regs[r1] = (uint64_t)(res >> 64);
360 env->regs[r1 + 1] = (uint64_t)res;
361 #else
362 mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
363 #endif
366 /* 128 -> 64/64 unsigned division */
367 void HELPER(dlg)(uint32_t r1, uint64_t v2)
369 uint64_t divisor = v2;
371 if (!env->regs[r1]) {
372 /* 64 -> 64/64 case */
373 env->regs[r1] = env->regs[r1+1] % divisor;
374 env->regs[r1+1] = env->regs[r1+1] / divisor;
375 return;
376 } else {
378 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
379 /* assuming 64-bit hosts have __uint128_t */
380 __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
381 (env->regs[r1+1]);
382 __uint128_t quotient = dividend / divisor;
383 env->regs[r1+1] = quotient;
384 __uint128_t remainder = dividend % divisor;
385 env->regs[r1] = remainder;
386 #else
387 /* 32-bit hosts would need special wrapper functionality - just abort if
388 we encounter such a case; it's very unlikely anyways. */
389 cpu_abort(env, "128 -> 64/64 division not implemented\n");
390 #endif
394 static inline uint64_t get_address(int x2, int b2, int d2)
396 uint64_t r = d2;
398 if (x2) {
399 r += env->regs[x2];
402 if (b2) {
403 r += env->regs[b2];
406 /* 31-Bit mode */
407 if (!(env->psw.mask & PSW_MASK_64)) {
408 r &= 0x7fffffff;
411 return r;
414 static inline uint64_t get_address_31fix(int reg)
416 uint64_t r = env->regs[reg];
418 /* 31-Bit mode */
419 if (!(env->psw.mask & PSW_MASK_64)) {
420 r &= 0x7fffffff;
423 return r;
426 /* search string (c is byte to search, r2 is string, r1 end of string) */
427 uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
429 uint64_t i;
430 uint32_t cc = 2;
431 uint64_t str = get_address_31fix(r2);
432 uint64_t end = get_address_31fix(r1);
434 HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
435 c, env->regs[r1], env->regs[r2]);
437 for (i = str; i != end; i++) {
438 if (ldub(i) == c) {
439 env->regs[r1] = i;
440 cc = 1;
441 break;
445 return cc;
448 /* unsigned string compare (c is string terminator) */
449 uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
451 uint64_t s1 = get_address_31fix(r1);
452 uint64_t s2 = get_address_31fix(r2);
453 uint8_t v1, v2;
454 uint32_t cc;
455 c = c & 0xff;
456 #ifdef CONFIG_USER_ONLY
457 if (!c) {
458 HELPER_LOG("%s: comparing '%s' and '%s'\n",
459 __FUNCTION__, (char*)g2h(s1), (char*)g2h(s2));
461 #endif
462 for (;;) {
463 v1 = ldub(s1);
464 v2 = ldub(s2);
465 if ((v1 == c || v2 == c) || (v1 != v2)) {
466 break;
468 s1++;
469 s2++;
472 if (v1 == v2) {
473 cc = 0;
474 } else {
475 cc = (v1 < v2) ? 1 : 2;
476 /* FIXME: 31-bit mode! */
477 env->regs[r1] = s1;
478 env->regs[r2] = s2;
480 return cc;
483 /* move page */
484 void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
486 /* XXX missing r0 handling */
487 #ifdef CONFIG_USER_ONLY
488 int i;
490 for (i = 0; i < TARGET_PAGE_SIZE; i++) {
491 stb(r1 + i, ldub(r2 + i));
493 #else
494 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
495 #endif
498 /* string copy (c is string terminator) */
499 void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
501 uint64_t dest = get_address_31fix(r1);
502 uint64_t src = get_address_31fix(r2);
503 uint8_t v;
504 c = c & 0xff;
505 #ifdef CONFIG_USER_ONLY
506 if (!c) {
507 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src),
508 dest);
510 #endif
511 for (;;) {
512 v = ldub(src);
513 stb(dest, v);
514 if (v == c) {
515 break;
517 src++;
518 dest++;
520 env->regs[r1] = dest; /* FIXME: 31-bit mode! */
523 /* compare and swap 64-bit */
524 uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
526 /* FIXME: locking? */
527 uint32_t cc;
528 uint64_t v2 = ldq(a2);
529 if (env->regs[r1] == v2) {
530 cc = 0;
531 stq(a2, env->regs[r3]);
532 } else {
533 cc = 1;
534 env->regs[r1] = v2;
536 return cc;
539 /* compare double and swap 64-bit */
540 uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
542 /* FIXME: locking? */
543 uint32_t cc;
544 uint64_t v2_hi = ldq(a2);
545 uint64_t v2_lo = ldq(a2 + 8);
546 uint64_t v1_hi = env->regs[r1];
547 uint64_t v1_lo = env->regs[r1 + 1];
549 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
550 cc = 0;
551 stq(a2, env->regs[r3]);
552 stq(a2 + 8, env->regs[r3 + 1]);
553 } else {
554 cc = 1;
555 env->regs[r1] = v2_hi;
556 env->regs[r1 + 1] = v2_lo;
559 return cc;
562 /* compare and swap 32-bit */
563 uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
565 /* FIXME: locking? */
566 uint32_t cc;
567 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
568 uint32_t v2 = ldl(a2);
569 if (((uint32_t)env->regs[r1]) == v2) {
570 cc = 0;
571 stl(a2, (uint32_t)env->regs[r3]);
572 } else {
573 cc = 1;
574 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
576 return cc;
579 static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
581 int pos = 24; /* top of the lower half of r1 */
582 uint64_t rmask = 0xff000000ULL;
583 uint8_t val = 0;
584 int ccd = 0;
585 uint32_t cc = 0;
587 while (mask) {
588 if (mask & 8) {
589 env->regs[r1] &= ~rmask;
590 val = ldub(address);
591 if ((val & 0x80) && !ccd) {
592 cc = 1;
594 ccd = 1;
595 if (val && cc == 0) {
596 cc = 2;
598 env->regs[r1] |= (uint64_t)val << pos;
599 address++;
601 mask = (mask << 1) & 0xf;
602 pos -= 8;
603 rmask >>= 8;
606 return cc;
609 /* execute instruction
610 this instruction executes an insn modified with the contents of r1
611 it does not change the executed instruction in memory
612 it does not change the program counter
613 in other words: tricky...
614 currently implemented by interpreting the cases it is most commonly used in
616 uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
618 uint16_t insn = lduw_code(addr);
619 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
620 insn);
621 if ((insn & 0xf0ff) == 0xd000) {
622 uint32_t l, insn2, b1, b2, d1, d2;
623 l = v1 & 0xff;
624 insn2 = ldl_code(addr + 2);
625 b1 = (insn2 >> 28) & 0xf;
626 b2 = (insn2 >> 12) & 0xf;
627 d1 = (insn2 >> 16) & 0xfff;
628 d2 = insn2 & 0xfff;
629 switch (insn & 0xf00) {
630 case 0x200:
631 helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
632 break;
633 case 0x500:
634 cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
635 break;
636 case 0x700:
637 cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
638 break;
639 default:
640 goto abort;
641 break;
643 } else if ((insn & 0xff00) == 0x0a00) {
644 /* supervisor call */
645 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
646 env->psw.addr = ret - 4;
647 env->int_svc_code = (insn|v1) & 0xff;
648 env->int_svc_ilc = 4;
649 helper_exception(EXCP_SVC);
650 } else if ((insn & 0xff00) == 0xbf00) {
651 uint32_t insn2, r1, r3, b2, d2;
652 insn2 = ldl_code(addr + 2);
653 r1 = (insn2 >> 20) & 0xf;
654 r3 = (insn2 >> 16) & 0xf;
655 b2 = (insn2 >> 12) & 0xf;
656 d2 = insn2 & 0xfff;
657 cc = helper_icm(r1, get_address(0, b2, d2), r3);
658 } else {
659 abort:
660 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
661 insn);
663 return cc;
666 /* absolute value 32-bit */
667 uint32_t HELPER(abs_i32)(int32_t val)
669 if (val < 0) {
670 return -val;
671 } else {
672 return val;
676 /* negative absolute value 32-bit */
677 int32_t HELPER(nabs_i32)(int32_t val)
679 if (val < 0) {
680 return val;
681 } else {
682 return -val;
686 /* absolute value 64-bit */
687 uint64_t HELPER(abs_i64)(int64_t val)
689 HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
691 if (val < 0) {
692 return -val;
693 } else {
694 return val;
698 /* negative absolute value 64-bit */
699 int64_t HELPER(nabs_i64)(int64_t val)
701 if (val < 0) {
702 return val;
703 } else {
704 return -val;
708 /* add with carry 32-bit unsigned */
709 uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
711 uint32_t res;
713 res = v1 + v2;
714 if (cc & 2) {
715 res++;
718 return res;
721 /* store character under mask high operates on the upper half of r1 */
722 void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
724 int pos = 56; /* top of the upper half of r1 */
726 while (mask) {
727 if (mask & 8) {
728 stb(address, (env->regs[r1] >> pos) & 0xff);
729 address++;
731 mask = (mask << 1) & 0xf;
732 pos -= 8;
736 /* insert character under mask high; same as icm, but operates on the
737 upper half of r1 */
738 uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
740 int pos = 56; /* top of the upper half of r1 */
741 uint64_t rmask = 0xff00000000000000ULL;
742 uint8_t val = 0;
743 int ccd = 0;
744 uint32_t cc = 0;
746 while (mask) {
747 if (mask & 8) {
748 env->regs[r1] &= ~rmask;
749 val = ldub(address);
750 if ((val & 0x80) && !ccd) {
751 cc = 1;
753 ccd = 1;
754 if (val && cc == 0) {
755 cc = 2;
757 env->regs[r1] |= (uint64_t)val << pos;
758 address++;
760 mask = (mask << 1) & 0xf;
761 pos -= 8;
762 rmask >>= 8;
765 return cc;
768 /* insert psw mask and condition code into r1 */
769 void HELPER(ipm)(uint32_t cc, uint32_t r1)
771 uint64_t r = env->regs[r1];
773 r &= 0xffffffff00ffffffULL;
774 r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
775 env->regs[r1] = r;
776 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
777 cc, env->psw.mask, r);
780 /* load access registers r1 to r3 from memory at a2 */
781 void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
783 int i;
785 for (i = r1;; i = (i + 1) % 16) {
786 env->aregs[i] = ldl(a2);
787 a2 += 4;
789 if (i == r3) {
790 break;
795 /* store access registers r1 to r3 in memory at a2 */
796 void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
798 int i;
800 for (i = r1;; i = (i + 1) % 16) {
801 stl(a2, env->aregs[i]);
802 a2 += 4;
804 if (i == r3) {
805 break;
810 /* move long */
811 uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
813 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
814 uint64_t dest = get_address_31fix(r1);
815 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
816 uint64_t src = get_address_31fix(r2);
817 uint8_t pad = src >> 24;
818 uint8_t v;
819 uint32_t cc;
821 if (destlen == srclen) {
822 cc = 0;
823 } else if (destlen < srclen) {
824 cc = 1;
825 } else {
826 cc = 2;
829 if (srclen > destlen) {
830 srclen = destlen;
833 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
834 v = ldub(src);
835 stb(dest, v);
838 for (; destlen; dest++, destlen--) {
839 stb(dest, pad);
842 env->regs[r1 + 1] = destlen;
843 /* can't use srclen here, we trunc'ed it */
844 env->regs[r2 + 1] -= src - env->regs[r2];
845 env->regs[r1] = dest;
846 env->regs[r2] = src;
848 return cc;
851 /* move long extended another memcopy insn with more bells and whistles */
852 uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
854 uint64_t destlen = env->regs[r1 + 1];
855 uint64_t dest = env->regs[r1];
856 uint64_t srclen = env->regs[r3 + 1];
857 uint64_t src = env->regs[r3];
858 uint8_t pad = a2 & 0xff;
859 uint8_t v;
860 uint32_t cc;
862 if (!(env->psw.mask & PSW_MASK_64)) {
863 destlen = (uint32_t)destlen;
864 srclen = (uint32_t)srclen;
865 dest &= 0x7fffffff;
866 src &= 0x7fffffff;
869 if (destlen == srclen) {
870 cc = 0;
871 } else if (destlen < srclen) {
872 cc = 1;
873 } else {
874 cc = 2;
877 if (srclen > destlen) {
878 srclen = destlen;
881 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
882 v = ldub(src);
883 stb(dest, v);
886 for (; destlen; dest++, destlen--) {
887 stb(dest, pad);
890 env->regs[r1 + 1] = destlen;
891 /* can't use srclen here, we trunc'ed it */
892 /* FIXME: 31-bit mode! */
893 env->regs[r3 + 1] -= src - env->regs[r3];
894 env->regs[r1] = dest;
895 env->regs[r3] = src;
897 return cc;
900 /* compare logical long extended memcompare insn with padding */
901 uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
903 uint64_t destlen = env->regs[r1 + 1];
904 uint64_t dest = get_address_31fix(r1);
905 uint64_t srclen = env->regs[r3 + 1];
906 uint64_t src = get_address_31fix(r3);
907 uint8_t pad = a2 & 0xff;
908 uint8_t v1 = 0,v2 = 0;
909 uint32_t cc = 0;
911 if (!(destlen || srclen)) {
912 return cc;
915 if (srclen > destlen) {
916 srclen = destlen;
919 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
920 v1 = srclen ? ldub(src) : pad;
921 v2 = destlen ? ldub(dest) : pad;
922 if (v1 != v2) {
923 cc = (v1 < v2) ? 1 : 2;
924 break;
928 env->regs[r1 + 1] = destlen;
929 /* can't use srclen here, we trunc'ed it */
930 env->regs[r3 + 1] -= src - env->regs[r3];
931 env->regs[r1] = dest;
932 env->regs[r3] = src;
934 return cc;
937 /* subtract unsigned v2 from v1 with borrow */
938 uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
940 uint32_t v1 = env->regs[r1];
941 uint32_t res = v1 + (~v2) + (cc >> 1);
943 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
944 if (cc & 2) {
945 /* borrow */
946 return v1 ? 1 : 0;
947 } else {
948 return v1 ? 3 : 2;
952 /* subtract unsigned v2 from v1 with borrow */
953 uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
955 uint64_t res = v1 + (~v2) + (cc >> 1);
957 env->regs[r1] = res;
958 if (cc & 2) {
959 /* borrow */
960 return v1 ? 1 : 0;
961 } else {
962 return v1 ? 3 : 2;
966 static inline int float_comp_to_cc(int float_compare)
968 switch (float_compare) {
969 case float_relation_equal:
970 return 0;
971 case float_relation_less:
972 return 1;
973 case float_relation_greater:
974 return 2;
975 case float_relation_unordered:
976 return 3;
977 default:
978 cpu_abort(env, "unknown return value for float compare\n");
982 /* condition codes for binary FP ops */
983 static uint32_t set_cc_f32(float32 v1, float32 v2)
985 return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
988 static uint32_t set_cc_f64(float64 v1, float64 v2)
990 return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
993 /* condition codes for unary FP ops */
994 static uint32_t set_cc_nz_f32(float32 v)
996 if (float32_is_any_nan(v)) {
997 return 3;
998 } else if (float32_is_zero(v)) {
999 return 0;
1000 } else if (float32_is_neg(v)) {
1001 return 1;
1002 } else {
1003 return 2;
1007 static uint32_t set_cc_nz_f64(float64 v)
1009 if (float64_is_any_nan(v)) {
1010 return 3;
1011 } else if (float64_is_zero(v)) {
1012 return 0;
1013 } else if (float64_is_neg(v)) {
1014 return 1;
1015 } else {
1016 return 2;
1020 static uint32_t set_cc_nz_f128(float128 v)
1022 if (float128_is_any_nan(v)) {
1023 return 3;
1024 } else if (float128_is_zero(v)) {
1025 return 0;
1026 } else if (float128_is_neg(v)) {
1027 return 1;
1028 } else {
1029 return 2;
1033 /* convert 32-bit int to 64-bit float */
1034 void HELPER(cdfbr)(uint32_t f1, int32_t v2)
1036 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
1037 env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
1040 /* convert 32-bit int to 128-bit float */
1041 void HELPER(cxfbr)(uint32_t f1, int32_t v2)
1043 CPU_QuadU v1;
1044 v1.q = int32_to_float128(v2, &env->fpu_status);
1045 env->fregs[f1].ll = v1.ll.upper;
1046 env->fregs[f1 + 2].ll = v1.ll.lower;
1049 /* convert 64-bit int to 32-bit float */
1050 void HELPER(cegbr)(uint32_t f1, int64_t v2)
1052 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1053 env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
1056 /* convert 64-bit int to 64-bit float */
1057 void HELPER(cdgbr)(uint32_t f1, int64_t v2)
1059 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1060 env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
1063 /* convert 64-bit int to 128-bit float */
1064 void HELPER(cxgbr)(uint32_t f1, int64_t v2)
1066 CPU_QuadU x1;
1067 x1.q = int64_to_float128(v2, &env->fpu_status);
1068 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
1069 x1.ll.upper, x1.ll.lower);
1070 env->fregs[f1].ll = x1.ll.upper;
1071 env->fregs[f1 + 2].ll = x1.ll.lower;
1074 /* convert 32-bit int to 32-bit float */
1075 void HELPER(cefbr)(uint32_t f1, int32_t v2)
1077 env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
1078 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
1079 env->fregs[f1].l.upper, f1);
1082 /* 32-bit FP addition RR */
1083 uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
1085 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1086 env->fregs[f2].l.upper,
1087 &env->fpu_status);
1088 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1089 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1091 return set_cc_nz_f32(env->fregs[f1].l.upper);
1094 /* 64-bit FP addition RR */
1095 uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
1097 env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
1098 &env->fpu_status);
1099 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
1100 env->fregs[f2].d, env->fregs[f1].d, f1);
1102 return set_cc_nz_f64(env->fregs[f1].d);
1105 /* 32-bit FP subtraction RR */
1106 uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
1108 env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
1109 env->fregs[f2].l.upper,
1110 &env->fpu_status);
1111 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1112 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1114 return set_cc_nz_f32(env->fregs[f1].l.upper);
1117 /* 64-bit FP subtraction RR */
1118 uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
1120 env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
1121 &env->fpu_status);
1122 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1123 __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
1125 return set_cc_nz_f64(env->fregs[f1].d);
1128 /* 32-bit FP division RR */
1129 void HELPER(debr)(uint32_t f1, uint32_t f2)
1131 env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
1132 env->fregs[f2].l.upper,
1133 &env->fpu_status);
1136 /* 128-bit FP division RR */
1137 void HELPER(dxbr)(uint32_t f1, uint32_t f2)
1139 CPU_QuadU v1;
1140 v1.ll.upper = env->fregs[f1].ll;
1141 v1.ll.lower = env->fregs[f1 + 2].ll;
1142 CPU_QuadU v2;
1143 v2.ll.upper = env->fregs[f2].ll;
1144 v2.ll.lower = env->fregs[f2 + 2].ll;
1145 CPU_QuadU res;
1146 res.q = float128_div(v1.q, v2.q, &env->fpu_status);
1147 env->fregs[f1].ll = res.ll.upper;
1148 env->fregs[f1 + 2].ll = res.ll.lower;
1151 /* 64-bit FP multiplication RR */
1152 void HELPER(mdbr)(uint32_t f1, uint32_t f2)
1154 env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
1155 &env->fpu_status);
1158 /* 128-bit FP multiplication RR */
1159 void HELPER(mxbr)(uint32_t f1, uint32_t f2)
1161 CPU_QuadU v1;
1162 v1.ll.upper = env->fregs[f1].ll;
1163 v1.ll.lower = env->fregs[f1 + 2].ll;
1164 CPU_QuadU v2;
1165 v2.ll.upper = env->fregs[f2].ll;
1166 v2.ll.lower = env->fregs[f2 + 2].ll;
1167 CPU_QuadU res;
1168 res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
1169 env->fregs[f1].ll = res.ll.upper;
1170 env->fregs[f1 + 2].ll = res.ll.lower;
1173 /* convert 32-bit float to 64-bit float */
1174 void HELPER(ldebr)(uint32_t r1, uint32_t r2)
1176 env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
1177 &env->fpu_status);
1180 /* convert 128-bit float to 64-bit float */
1181 void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
1183 CPU_QuadU x2;
1184 x2.ll.upper = env->fregs[f2].ll;
1185 x2.ll.lower = env->fregs[f2 + 2].ll;
1186 env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
1187 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
1190 /* convert 64-bit float to 128-bit float */
1191 void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
1193 CPU_QuadU res;
1194 res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
1195 env->fregs[f1].ll = res.ll.upper;
1196 env->fregs[f1 + 2].ll = res.ll.lower;
1199 /* convert 64-bit float to 32-bit float */
1200 void HELPER(ledbr)(uint32_t f1, uint32_t f2)
1202 float64 d2 = env->fregs[f2].d;
1203 env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
1206 /* convert 128-bit float to 32-bit float */
1207 void HELPER(lexbr)(uint32_t f1, uint32_t f2)
1209 CPU_QuadU x2;
1210 x2.ll.upper = env->fregs[f2].ll;
1211 x2.ll.lower = env->fregs[f2 + 2].ll;
1212 env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
1213 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
1216 /* absolute value of 32-bit float */
1217 uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
1219 float32 v1;
1220 float32 v2 = env->fregs[f2].d;
1221 v1 = float32_abs(v2);
1222 env->fregs[f1].d = v1;
1223 return set_cc_nz_f32(v1);
1226 /* absolute value of 64-bit float */
1227 uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
1229 float64 v1;
1230 float64 v2 = env->fregs[f2].d;
1231 v1 = float64_abs(v2);
1232 env->fregs[f1].d = v1;
1233 return set_cc_nz_f64(v1);
1236 /* absolute value of 128-bit float */
1237 uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
1239 CPU_QuadU v1;
1240 CPU_QuadU v2;
1241 v2.ll.upper = env->fregs[f2].ll;
1242 v2.ll.lower = env->fregs[f2 + 2].ll;
1243 v1.q = float128_abs(v2.q);
1244 env->fregs[f1].ll = v1.ll.upper;
1245 env->fregs[f1 + 2].ll = v1.ll.lower;
1246 return set_cc_nz_f128(v1.q);
1249 /* load and test 64-bit float */
1250 uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
1252 env->fregs[f1].d = env->fregs[f2].d;
1253 return set_cc_nz_f64(env->fregs[f1].d);
1256 /* load and test 32-bit float */
1257 uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
1259 env->fregs[f1].l.upper = env->fregs[f2].l.upper;
1260 return set_cc_nz_f32(env->fregs[f1].l.upper);
1263 /* load and test 128-bit float */
1264 uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
1266 CPU_QuadU x;
1267 x.ll.upper = env->fregs[f2].ll;
1268 x.ll.lower = env->fregs[f2 + 2].ll;
1269 env->fregs[f1].ll = x.ll.upper;
1270 env->fregs[f1 + 2].ll = x.ll.lower;
1271 return set_cc_nz_f128(x.q);
1274 /* load complement of 32-bit float */
1275 uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
1277 env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
1279 return set_cc_nz_f32(env->fregs[f1].l.upper);
1282 /* load complement of 64-bit float */
1283 uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
1285 env->fregs[f1].d = float64_chs(env->fregs[f2].d);
1287 return set_cc_nz_f64(env->fregs[f1].d);
1290 /* load complement of 128-bit float */
1291 uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
1293 CPU_QuadU x1, x2;
1294 x2.ll.upper = env->fregs[f2].ll;
1295 x2.ll.lower = env->fregs[f2 + 2].ll;
1296 x1.q = float128_chs(x2.q);
1297 env->fregs[f1].ll = x1.ll.upper;
1298 env->fregs[f1 + 2].ll = x1.ll.lower;
1299 return set_cc_nz_f128(x1.q);
1302 /* 32-bit FP addition RM */
1303 void HELPER(aeb)(uint32_t f1, uint32_t val)
1305 float32 v1 = env->fregs[f1].l.upper;
1306 CPU_FloatU v2;
1307 v2.l = val;
1308 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
1309 v1, f1, v2.f);
1310 env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
1313 /* 32-bit FP division RM */
1314 void HELPER(deb)(uint32_t f1, uint32_t val)
1316 float32 v1 = env->fregs[f1].l.upper;
1317 CPU_FloatU v2;
1318 v2.l = val;
1319 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
1320 v1, f1, v2.f);
1321 env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
1324 /* 32-bit FP multiplication RM */
1325 void HELPER(meeb)(uint32_t f1, uint32_t val)
1327 float32 v1 = env->fregs[f1].l.upper;
1328 CPU_FloatU v2;
1329 v2.l = val;
1330 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
1331 v1, f1, v2.f);
1332 env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
1335 /* 32-bit FP compare RR */
1336 uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
1338 float32 v1 = env->fregs[f1].l.upper;
1339 float32 v2 = env->fregs[f2].l.upper;;
1340 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
1341 v1, f1, v2);
1342 return set_cc_f32(v1, v2);
1345 /* 64-bit FP compare RR */
1346 uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
1348 float64 v1 = env->fregs[f1].d;
1349 float64 v2 = env->fregs[f2].d;;
1350 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
1351 v1, f1, v2);
1352 return set_cc_f64(v1, v2);
1355 /* 128-bit FP compare RR */
1356 uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
1358 CPU_QuadU v1;
1359 v1.ll.upper = env->fregs[f1].ll;
1360 v1.ll.lower = env->fregs[f1 + 2].ll;
1361 CPU_QuadU v2;
1362 v2.ll.upper = env->fregs[f2].ll;
1363 v2.ll.lower = env->fregs[f2 + 2].ll;
1365 return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
1366 &env->fpu_status));
1369 /* 64-bit FP compare RM */
1370 uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
1372 float64 v1 = env->fregs[f1].d;
1373 CPU_DoubleU v2;
1374 v2.ll = ldq(a2);
1375 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
1376 f1, v2.d);
1377 return set_cc_f64(v1, v2.d);
1380 /* 64-bit FP addition RM */
1381 uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
1383 float64 v1 = env->fregs[f1].d;
1384 CPU_DoubleU v2;
1385 v2.ll = ldq(a2);
1386 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
1387 v1, f1, v2.d);
1388 env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
1389 return set_cc_nz_f64(v1);
1392 /* 32-bit FP subtraction RM */
1393 void HELPER(seb)(uint32_t f1, uint32_t val)
1395 float32 v1 = env->fregs[f1].l.upper;
1396 CPU_FloatU v2;
1397 v2.l = val;
1398 env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
1401 /* 64-bit FP subtraction RM */
1402 uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
1404 float64 v1 = env->fregs[f1].d;
1405 CPU_DoubleU v2;
1406 v2.ll = ldq(a2);
1407 env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
1408 return set_cc_nz_f64(v1);
1411 /* 64-bit FP multiplication RM */
1412 void HELPER(mdb)(uint32_t f1, uint64_t a2)
1414 float64 v1 = env->fregs[f1].d;
1415 CPU_DoubleU v2;
1416 v2.ll = ldq(a2);
1417 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
1418 v1, f1, v2.d);
1419 env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
1422 /* 64-bit FP division RM */
1423 void HELPER(ddb)(uint32_t f1, uint64_t a2)
1425 float64 v1 = env->fregs[f1].d;
1426 CPU_DoubleU v2;
1427 v2.ll = ldq(a2);
1428 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
1429 v1, f1, v2.d);
1430 env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
1433 static void set_round_mode(int m3)
1435 switch (m3) {
1436 case 0:
1437 /* current mode */
1438 break;
1439 case 1:
1440 /* biased round no nearest */
1441 case 4:
1442 /* round to nearest */
1443 set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
1444 break;
1445 case 5:
1446 /* round to zero */
1447 set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
1448 break;
1449 case 6:
1450 /* round to +inf */
1451 set_float_rounding_mode(float_round_up, &env->fpu_status);
1452 break;
1453 case 7:
1454 /* round to -inf */
1455 set_float_rounding_mode(float_round_down, &env->fpu_status);
1456 break;
1460 /* convert 32-bit float to 64-bit int */
1461 uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1463 float32 v2 = env->fregs[f2].l.upper;
1464 set_round_mode(m3);
1465 env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
1466 return set_cc_nz_f32(v2);
1469 /* convert 64-bit float to 64-bit int */
1470 uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1472 float64 v2 = env->fregs[f2].d;
1473 set_round_mode(m3);
1474 env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
1475 return set_cc_nz_f64(v2);
1478 /* convert 128-bit float to 64-bit int */
1479 uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1481 CPU_QuadU v2;
1482 v2.ll.upper = env->fregs[f2].ll;
1483 v2.ll.lower = env->fregs[f2 + 2].ll;
1484 set_round_mode(m3);
1485 env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
1486 if (float128_is_any_nan(v2.q)) {
1487 return 3;
1488 } else if (float128_is_zero(v2.q)) {
1489 return 0;
1490 } else if (float128_is_neg(v2.q)) {
1491 return 1;
1492 } else {
1493 return 2;
1497 /* convert 32-bit float to 32-bit int */
1498 uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1500 float32 v2 = env->fregs[f2].l.upper;
1501 set_round_mode(m3);
1502 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1503 float32_to_int32(v2, &env->fpu_status);
1504 return set_cc_nz_f32(v2);
1507 /* convert 64-bit float to 32-bit int */
1508 uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1510 float64 v2 = env->fregs[f2].d;
1511 set_round_mode(m3);
1512 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1513 float64_to_int32(v2, &env->fpu_status);
1514 return set_cc_nz_f64(v2);
1517 /* convert 128-bit float to 32-bit int */
1518 uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1520 CPU_QuadU v2;
1521 v2.ll.upper = env->fregs[f2].ll;
1522 v2.ll.lower = env->fregs[f2 + 2].ll;
1523 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1524 float128_to_int32(v2.q, &env->fpu_status);
1525 return set_cc_nz_f128(v2.q);
1528 /* load 32-bit FP zero */
1529 void HELPER(lzer)(uint32_t f1)
1531 env->fregs[f1].l.upper = float32_zero;
1534 /* load 64-bit FP zero */
1535 void HELPER(lzdr)(uint32_t f1)
1537 env->fregs[f1].d = float64_zero;
1540 /* load 128-bit FP zero */
1541 void HELPER(lzxr)(uint32_t f1)
1543 CPU_QuadU x;
1544 x.q = float64_to_float128(float64_zero, &env->fpu_status);
1545 env->fregs[f1].ll = x.ll.upper;
1546 env->fregs[f1 + 1].ll = x.ll.lower;
1549 /* 128-bit FP subtraction RR */
1550 uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
1552 CPU_QuadU v1;
1553 v1.ll.upper = env->fregs[f1].ll;
1554 v1.ll.lower = env->fregs[f1 + 2].ll;
1555 CPU_QuadU v2;
1556 v2.ll.upper = env->fregs[f2].ll;
1557 v2.ll.lower = env->fregs[f2 + 2].ll;
1558 CPU_QuadU res;
1559 res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
1560 env->fregs[f1].ll = res.ll.upper;
1561 env->fregs[f1 + 2].ll = res.ll.lower;
1562 return set_cc_nz_f128(res.q);
1565 /* 128-bit FP addition RR */
1566 uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
1568 CPU_QuadU v1;
1569 v1.ll.upper = env->fregs[f1].ll;
1570 v1.ll.lower = env->fregs[f1 + 2].ll;
1571 CPU_QuadU v2;
1572 v2.ll.upper = env->fregs[f2].ll;
1573 v2.ll.lower = env->fregs[f2 + 2].ll;
1574 CPU_QuadU res;
1575 res.q = float128_add(v1.q, v2.q, &env->fpu_status);
1576 env->fregs[f1].ll = res.ll.upper;
1577 env->fregs[f1 + 2].ll = res.ll.lower;
1578 return set_cc_nz_f128(res.q);
1581 /* 32-bit FP multiplication RR */
1582 void HELPER(meebr)(uint32_t f1, uint32_t f2)
1584 env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
1585 env->fregs[f2].l.upper,
1586 &env->fpu_status);
1589 /* 64-bit FP division RR */
1590 void HELPER(ddbr)(uint32_t f1, uint32_t f2)
1592 env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
1593 &env->fpu_status);
1596 /* 64-bit FP multiply and add RM */
1597 void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
1599 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
1600 CPU_DoubleU v2;
1601 v2.ll = ldq(a2);
1602 env->fregs[f1].d = float64_add(env->fregs[f1].d,
1603 float64_mul(v2.d, env->fregs[f3].d,
1604 &env->fpu_status),
1605 &env->fpu_status);
1608 /* 64-bit FP multiply and add RR */
1609 void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1611 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1612 env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
1613 env->fregs[f3].d,
1614 &env->fpu_status),
1615 env->fregs[f1].d, &env->fpu_status);
1618 /* 64-bit FP multiply and subtract RR */
1619 void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1621 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1622 env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
1623 env->fregs[f3].d,
1624 &env->fpu_status),
1625 env->fregs[f1].d, &env->fpu_status);
1628 /* 32-bit FP multiply and add RR */
1629 void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
1631 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1632 float32_mul(env->fregs[f2].l.upper,
1633 env->fregs[f3].l.upper,
1634 &env->fpu_status),
1635 &env->fpu_status);
1638 /* convert 32-bit float to 64-bit float */
1639 void HELPER(ldeb)(uint32_t f1, uint64_t a2)
1641 uint32_t v2;
1642 v2 = ldl(a2);
1643 env->fregs[f1].d = float32_to_float64(v2,
1644 &env->fpu_status);
1647 /* convert 64-bit float to 128-bit float */
1648 void HELPER(lxdb)(uint32_t f1, uint64_t a2)
1650 CPU_DoubleU v2;
1651 v2.ll = ldq(a2);
1652 CPU_QuadU v1;
1653 v1.q = float64_to_float128(v2.d, &env->fpu_status);
1654 env->fregs[f1].ll = v1.ll.upper;
1655 env->fregs[f1 + 2].ll = v1.ll.lower;
1658 /* test data class 32-bit */
1659 uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
1661 float32 v1 = env->fregs[f1].l.upper;
1662 int neg = float32_is_neg(v1);
1663 uint32_t cc = 0;
1665 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
1666 if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1667 (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1668 (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1669 (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1670 cc = 1;
1671 } else if (m2 & (1 << (9-neg))) {
1672 /* assume normalized number */
1673 cc = 1;
1676 /* FIXME: denormalized? */
1677 return cc;
1680 /* test data class 64-bit */
1681 uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
1683 float64 v1 = env->fregs[f1].d;
1684 int neg = float64_is_neg(v1);
1685 uint32_t cc = 0;
1687 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
1688 if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1689 (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1690 (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1691 (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1692 cc = 1;
1693 } else if (m2 & (1 << (9-neg))) {
1694 /* assume normalized number */
1695 cc = 1;
1697 /* FIXME: denormalized? */
1698 return cc;
1701 /* test data class 128-bit */
1702 uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
1704 CPU_QuadU v1;
1705 uint32_t cc = 0;
1706 v1.ll.upper = env->fregs[f1].ll;
1707 v1.ll.lower = env->fregs[f1 + 2].ll;
1709 int neg = float128_is_neg(v1.q);
1710 if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
1711 (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
1712 (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
1713 (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
1714 cc = 1;
1715 } else if (m2 & (1 << (9-neg))) {
1716 /* assume normalized number */
1717 cc = 1;
1719 /* FIXME: denormalized? */
1720 return cc;
1723 /* find leftmost one */
1724 uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
1726 uint64_t res = 0;
1727 uint64_t ov2 = v2;
1729 while (!(v2 & 0x8000000000000000ULL) && v2) {
1730 v2 <<= 1;
1731 res++;
1734 if (!v2) {
1735 env->regs[r1] = 64;
1736 env->regs[r1 + 1] = 0;
1737 return 0;
1738 } else {
1739 env->regs[r1] = res;
1740 env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
1741 return 2;
1745 /* square root 64-bit RR */
1746 void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
1748 env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
1751 /* checksum */
1752 void HELPER(cksm)(uint32_t r1, uint32_t r2)
1754 uint64_t src = get_address_31fix(r2);
1755 uint64_t src_len = env->regs[(r2 + 1) & 15];
1756 uint64_t cksm = (uint32_t)env->regs[r1];
1758 while (src_len >= 4) {
1759 cksm += ldl(src);
1761 /* move to next word */
1762 src_len -= 4;
1763 src += 4;
1766 switch (src_len) {
1767 case 0:
1768 break;
1769 case 1:
1770 cksm += ldub(src) << 24;
1771 break;
1772 case 2:
1773 cksm += lduw(src) << 16;
1774 break;
1775 case 3:
1776 cksm += lduw(src) << 16;
1777 cksm += ldub(src + 2) << 8;
1778 break;
1781 /* indicate we've processed everything */
1782 env->regs[r2] = src + src_len;
1783 env->regs[(r2 + 1) & 15] = 0;
1785 /* store result */
1786 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1787 ((uint32_t)cksm + (cksm >> 32));
1790 static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src,
1791 int32_t dst)
1793 if (src == dst) {
1794 return 0;
1795 } else if (src < dst) {
1796 return 1;
1797 } else {
1798 return 2;
1802 static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst)
1804 return cc_calc_ltgt_32(env, dst, 0);
1807 static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src,
1808 int64_t dst)
1810 if (src == dst) {
1811 return 0;
1812 } else if (src < dst) {
1813 return 1;
1814 } else {
1815 return 2;
1819 static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst)
1821 return cc_calc_ltgt_64(env, dst, 0);
1824 static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src,
1825 uint32_t dst)
1827 if (src == dst) {
1828 return 0;
1829 } else if (src < dst) {
1830 return 1;
1831 } else {
1832 return 2;
1836 static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src,
1837 uint64_t dst)
1839 if (src == dst) {
1840 return 0;
1841 } else if (src < dst) {
1842 return 1;
1843 } else {
1844 return 2;
1848 static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask)
1850 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
1851 uint16_t r = val & mask;
1852 if (r == 0 || mask == 0) {
1853 return 0;
1854 } else if (r == mask) {
1855 return 3;
1856 } else {
1857 return 1;
1861 /* set condition code for test under mask */
1862 static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask)
1864 uint16_t r = val & mask;
1865 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
1866 if (r == 0 || mask == 0) {
1867 return 0;
1868 } else if (r == mask) {
1869 return 3;
1870 } else {
1871 while (!(mask & 0x8000)) {
1872 mask <<= 1;
1873 val <<= 1;
1875 if (val & 0x8000) {
1876 return 2;
1877 } else {
1878 return 1;
1883 static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst)
1885 return !!dst;
1888 static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2,
1889 int64_t ar)
1891 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1892 return 3; /* overflow */
1893 } else {
1894 if (ar < 0) {
1895 return 1;
1896 } else if (ar > 0) {
1897 return 2;
1898 } else {
1899 return 0;
1904 static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2,
1905 uint64_t ar)
1907 if (ar == 0) {
1908 if (a1) {
1909 return 2;
1910 } else {
1911 return 0;
1913 } else {
1914 if (ar < a1 || ar < a2) {
1915 return 3;
1916 } else {
1917 return 1;
1922 static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2,
1923 int64_t ar)
1925 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
1926 return 3; /* overflow */
1927 } else {
1928 if (ar < 0) {
1929 return 1;
1930 } else if (ar > 0) {
1931 return 2;
1932 } else {
1933 return 0;
1938 static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2,
1939 uint64_t ar)
1941 if (ar == 0) {
1942 return 2;
1943 } else {
1944 if (a2 > a1) {
1945 return 1;
1946 } else {
1947 return 3;
1952 static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst)
1954 if ((uint64_t)dst == 0x8000000000000000ULL) {
1955 return 3;
1956 } else if (dst) {
1957 return 1;
1958 } else {
1959 return 0;
1963 static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst)
1965 return !!dst;
1968 static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst)
1970 if ((uint64_t)dst == 0x8000000000000000ULL) {
1971 return 3;
1972 } else if (dst < 0) {
1973 return 1;
1974 } else if (dst > 0) {
1975 return 2;
1976 } else {
1977 return 0;
1982 static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2,
1983 int32_t ar)
1985 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1986 return 3; /* overflow */
1987 } else {
1988 if (ar < 0) {
1989 return 1;
1990 } else if (ar > 0) {
1991 return 2;
1992 } else {
1993 return 0;
1998 static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2,
1999 uint32_t ar)
2001 if (ar == 0) {
2002 if (a1) {
2003 return 2;
2004 } else {
2005 return 0;
2007 } else {
2008 if (ar < a1 || ar < a2) {
2009 return 3;
2010 } else {
2011 return 1;
2016 static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2,
2017 int32_t ar)
2019 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
2020 return 3; /* overflow */
2021 } else {
2022 if (ar < 0) {
2023 return 1;
2024 } else if (ar > 0) {
2025 return 2;
2026 } else {
2027 return 0;
2032 static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2,
2033 uint32_t ar)
2035 if (ar == 0) {
2036 return 2;
2037 } else {
2038 if (a2 > a1) {
2039 return 1;
2040 } else {
2041 return 3;
2046 static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst)
2048 if ((uint32_t)dst == 0x80000000UL) {
2049 return 3;
2050 } else if (dst) {
2051 return 1;
2052 } else {
2053 return 0;
2057 static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst)
2059 return !!dst;
2062 static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst)
2064 if ((uint32_t)dst == 0x80000000UL) {
2065 return 3;
2066 } else if (dst < 0) {
2067 return 1;
2068 } else if (dst > 0) {
2069 return 2;
2070 } else {
2071 return 0;
2075 /* calculate condition code for insert character under mask insn */
2076 static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val)
2078 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
2079 uint32_t cc;
2081 if (mask == 0xf) {
2082 if (!val) {
2083 return 0;
2084 } else if (val & 0x80000000) {
2085 return 1;
2086 } else {
2087 return 2;
2091 if (!val || !mask) {
2092 cc = 0;
2093 } else {
2094 while (mask != 1) {
2095 mask >>= 1;
2096 val >>= 8;
2098 if (val & 0x80) {
2099 cc = 1;
2100 } else {
2101 cc = 2;
2104 return cc;
2107 static inline uint32_t cc_calc_slag(CPUState *env, uint64_t src, uint64_t shift)
2109 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
2110 uint64_t match, r;
2112 /* check if the sign bit stays the same */
2113 if (src & (1ULL << 63)) {
2114 match = mask;
2115 } else {
2116 match = 0;
2119 if ((src & mask) != match) {
2120 /* overflow */
2121 return 3;
2124 r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
2126 if ((int64_t)r == 0) {
2127 return 0;
2128 } else if ((int64_t)r < 0) {
2129 return 1;
2132 return 2;
2136 static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src,
2137 uint64_t dst, uint64_t vr)
2139 uint32_t r = 0;
2141 switch (cc_op) {
2142 case CC_OP_CONST0:
2143 case CC_OP_CONST1:
2144 case CC_OP_CONST2:
2145 case CC_OP_CONST3:
2146 /* cc_op value _is_ cc */
2147 r = cc_op;
2148 break;
2149 case CC_OP_LTGT0_32:
2150 r = cc_calc_ltgt0_32(env, dst);
2151 break;
2152 case CC_OP_LTGT0_64:
2153 r = cc_calc_ltgt0_64(env, dst);
2154 break;
2155 case CC_OP_LTGT_32:
2156 r = cc_calc_ltgt_32(env, src, dst);
2157 break;
2158 case CC_OP_LTGT_64:
2159 r = cc_calc_ltgt_64(env, src, dst);
2160 break;
2161 case CC_OP_LTUGTU_32:
2162 r = cc_calc_ltugtu_32(env, src, dst);
2163 break;
2164 case CC_OP_LTUGTU_64:
2165 r = cc_calc_ltugtu_64(env, src, dst);
2166 break;
2167 case CC_OP_TM_32:
2168 r = cc_calc_tm_32(env, src, dst);
2169 break;
2170 case CC_OP_TM_64:
2171 r = cc_calc_tm_64(env, src, dst);
2172 break;
2173 case CC_OP_NZ:
2174 r = cc_calc_nz(env, dst);
2175 break;
2176 case CC_OP_ADD_64:
2177 r = cc_calc_add_64(env, src, dst, vr);
2178 break;
2179 case CC_OP_ADDU_64:
2180 r = cc_calc_addu_64(env, src, dst, vr);
2181 break;
2182 case CC_OP_SUB_64:
2183 r = cc_calc_sub_64(env, src, dst, vr);
2184 break;
2185 case CC_OP_SUBU_64:
2186 r = cc_calc_subu_64(env, src, dst, vr);
2187 break;
2188 case CC_OP_ABS_64:
2189 r = cc_calc_abs_64(env, dst);
2190 break;
2191 case CC_OP_NABS_64:
2192 r = cc_calc_nabs_64(env, dst);
2193 break;
2194 case CC_OP_COMP_64:
2195 r = cc_calc_comp_64(env, dst);
2196 break;
2198 case CC_OP_ADD_32:
2199 r = cc_calc_add_32(env, src, dst, vr);
2200 break;
2201 case CC_OP_ADDU_32:
2202 r = cc_calc_addu_32(env, src, dst, vr);
2203 break;
2204 case CC_OP_SUB_32:
2205 r = cc_calc_sub_32(env, src, dst, vr);
2206 break;
2207 case CC_OP_SUBU_32:
2208 r = cc_calc_subu_32(env, src, dst, vr);
2209 break;
2210 case CC_OP_ABS_32:
2211 r = cc_calc_abs_64(env, dst);
2212 break;
2213 case CC_OP_NABS_32:
2214 r = cc_calc_nabs_64(env, dst);
2215 break;
2216 case CC_OP_COMP_32:
2217 r = cc_calc_comp_32(env, dst);
2218 break;
2220 case CC_OP_ICM:
2221 r = cc_calc_icm_32(env, src, dst);
2222 break;
2223 case CC_OP_SLAG:
2224 r = cc_calc_slag(env, src, dst);
2225 break;
2227 case CC_OP_LTGT_F32:
2228 r = set_cc_f32(src, dst);
2229 break;
2230 case CC_OP_LTGT_F64:
2231 r = set_cc_f64(src, dst);
2232 break;
2233 case CC_OP_NZ_F32:
2234 r = set_cc_nz_f32(dst);
2235 break;
2236 case CC_OP_NZ_F64:
2237 r = set_cc_nz_f64(dst);
2238 break;
2240 default:
2241 cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
2244 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
2245 cc_name(cc_op), src, dst, vr, r);
2246 return r;
2249 uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
2250 uint64_t vr)
2252 return do_calc_cc(env, cc_op, src, dst, vr);
2255 uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
2256 uint64_t vr)
2258 return do_calc_cc(env, cc_op, src, dst, vr);
2261 uint64_t HELPER(cvd)(int32_t bin)
2263 /* positive 0 */
2264 uint64_t dec = 0x0c;
2265 int shift = 4;
2267 if (bin < 0) {
2268 bin = -bin;
2269 dec = 0x0d;
2272 for (shift = 4; (shift < 64) && bin; shift += 4) {
2273 int current_number = bin % 10;
2275 dec |= (current_number) << shift;
2276 bin /= 10;
2279 return dec;
2282 void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
2284 int len_dest = len >> 4;
2285 int len_src = len & 0xf;
2286 uint8_t b;
2287 int second_nibble = 0;
2289 dest += len_dest;
2290 src += len_src;
2292 /* last byte is special, it only flips the nibbles */
2293 b = ldub(src);
2294 stb(dest, (b << 4) | (b >> 4));
2295 src--;
2296 len_src--;
2298 /* now pad every nibble with 0xf0 */
2300 while (len_dest > 0) {
2301 uint8_t cur_byte = 0;
2303 if (len_src > 0) {
2304 cur_byte = ldub(src);
2307 len_dest--;
2308 dest--;
2310 /* only advance one nibble at a time */
2311 if (second_nibble) {
2312 cur_byte >>= 4;
2313 len_src--;
2314 src--;
2316 second_nibble = !second_nibble;
2318 /* digit */
2319 cur_byte = (cur_byte & 0xf);
2320 /* zone bits */
2321 cur_byte |= 0xf0;
2323 stb(dest, cur_byte);
2327 void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
2329 int i;
2331 for (i = 0; i <= len; i++) {
2332 uint8_t byte = ldub(array + i);
2333 uint8_t new_byte = ldub(trans + byte);
2334 stb(array + i, new_byte);
2338 #ifndef CONFIG_USER_ONLY
2340 void HELPER(load_psw)(uint64_t mask, uint64_t addr)
2342 load_psw(env, mask, addr);
2343 cpu_loop_exit(env);
2346 static void program_interrupt(CPUState *env, uint32_t code, int ilc)
2348 qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
2350 if (kvm_enabled()) {
2351 #ifdef CONFIG_KVM
2352 kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
2353 #endif
2354 } else {
2355 env->int_pgm_code = code;
2356 env->int_pgm_ilc = ilc;
2357 env->exception_index = EXCP_PGM;
2358 cpu_loop_exit(env);
2362 static void ext_interrupt(CPUState *env, int type, uint32_t param,
2363 uint64_t param64)
2365 cpu_inject_ext(env, type, param, param64);
2368 int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code)
2370 int r = 0;
2371 int shift = 0;
2373 #ifdef DEBUG_HELPER
2374 printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
2375 #endif
2377 if (sccb & ~0x7ffffff8ul) {
2378 fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
2379 r = -1;
2380 goto out;
2383 switch(code) {
2384 case SCLP_CMDW_READ_SCP_INFO:
2385 case SCLP_CMDW_READ_SCP_INFO_FORCED:
2386 while ((ram_size >> (20 + shift)) > 65535) {
2387 shift++;
2389 stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift));
2390 stb_phys(sccb + SCP_INCREMENT, 1 << shift);
2391 stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
2393 if (kvm_enabled()) {
2394 #ifdef CONFIG_KVM
2395 kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
2396 sccb & ~3, 0, 1);
2397 #endif
2398 } else {
2399 env->psw.addr += 4;
2400 ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
2402 break;
2403 default:
2404 #ifdef DEBUG_HELPER
2405 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
2406 #endif
2407 r = -1;
2408 break;
2411 out:
2412 return r;
2415 /* SCLP service call */
2416 uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
2418 if (sclp_service_call(env, r1, r2)) {
2419 return 3;
2422 return 0;
2425 /* DIAG */
2426 uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
2428 uint64_t r;
2430 switch (num) {
2431 case 0x500:
2432 /* KVM hypercall */
2433 r = s390_virtio_hypercall(env, mem, code);
2434 break;
2435 case 0x44:
2436 /* yield */
2437 r = 0;
2438 break;
2439 case 0x308:
2440 /* ipl */
2441 r = 0;
2442 break;
2443 default:
2444 r = -1;
2445 break;
2448 if (r) {
2449 program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
2452 return r;
2455 /* Store CPU ID */
2456 void HELPER(stidp)(uint64_t a1)
2458 stq(a1, env->cpu_num);
2461 /* Set Prefix */
2462 void HELPER(spx)(uint64_t a1)
2464 uint32_t prefix;
2466 prefix = ldl(a1);
2467 env->psa = prefix & 0xfffff000;
2468 qemu_log("prefix: %#x\n", prefix);
2469 tlb_flush_page(env, 0);
2470 tlb_flush_page(env, TARGET_PAGE_SIZE);
2473 /* Set Clock */
2474 uint32_t HELPER(sck)(uint64_t a1)
2476 /* XXX not implemented - is it necessary? */
2478 return 0;
2481 static inline uint64_t clock_value(CPUState *env)
2483 uint64_t time;
2485 time = env->tod_offset +
2486 time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
2488 return time;
2491 /* Store Clock */
2492 uint32_t HELPER(stck)(uint64_t a1)
2494 stq(a1, clock_value(env));
2496 return 0;
2499 /* Store Clock Extended */
2500 uint32_t HELPER(stcke)(uint64_t a1)
2502 stb(a1, 0);
2503 /* basically the same value as stck */
2504 stq(a1 + 1, clock_value(env) | env->cpu_num);
2505 /* more fine grained than stck */
2506 stq(a1 + 9, 0);
2507 /* XXX programmable fields */
2508 stw(a1 + 17, 0);
2511 return 0;
2514 /* Set Clock Comparator */
2515 void HELPER(sckc)(uint64_t a1)
2517 uint64_t time = ldq(a1);
2519 if (time == -1ULL) {
2520 return;
2523 /* difference between now and then */
2524 time -= clock_value(env);
2525 /* nanoseconds */
2526 time = (time * 125) >> 9;
2528 qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
2531 /* Store Clock Comparator */
2532 void HELPER(stckc)(uint64_t a1)
2534 /* XXX implement */
2535 stq(a1, 0);
2538 /* Set CPU Timer */
2539 void HELPER(spt)(uint64_t a1)
2541 uint64_t time = ldq(a1);
2543 if (time == -1ULL) {
2544 return;
2547 /* nanoseconds */
2548 time = (time * 125) >> 9;
2550 qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
2553 /* Store CPU Timer */
2554 void HELPER(stpt)(uint64_t a1)
2556 /* XXX implement */
2557 stq(a1, 0);
2560 /* Store System Information */
2561 uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
2563 int cc = 0;
2564 int sel1, sel2;
2566 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
2567 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
2568 /* valid function code, invalid reserved bits */
2569 program_interrupt(env, PGM_SPECIFICATION, 2);
2572 sel1 = r0 & STSI_R0_SEL1_MASK;
2573 sel2 = r1 & STSI_R1_SEL2_MASK;
2575 /* XXX: spec exception if sysib is not 4k-aligned */
2577 switch (r0 & STSI_LEVEL_MASK) {
2578 case STSI_LEVEL_1:
2579 if ((sel1 == 1) && (sel2 == 1)) {
2580 /* Basic Machine Configuration */
2581 struct sysib_111 sysib;
2583 memset(&sysib, 0, sizeof(sysib));
2584 ebcdic_put(sysib.manuf, "QEMU ", 16);
2585 /* same as machine type number in STORE CPU ID */
2586 ebcdic_put(sysib.type, "QEMU", 4);
2587 /* same as model number in STORE CPU ID */
2588 ebcdic_put(sysib.model, "QEMU ", 16);
2589 ebcdic_put(sysib.sequence, "QEMU ", 16);
2590 ebcdic_put(sysib.plant, "QEMU", 4);
2591 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2592 } else if ((sel1 == 2) && (sel2 == 1)) {
2593 /* Basic Machine CPU */
2594 struct sysib_121 sysib;
2596 memset(&sysib, 0, sizeof(sysib));
2597 /* XXX make different for different CPUs? */
2598 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2599 ebcdic_put(sysib.plant, "QEMU", 4);
2600 stw_p(&sysib.cpu_addr, env->cpu_num);
2601 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2602 } else if ((sel1 == 2) && (sel2 == 2)) {
2603 /* Basic Machine CPUs */
2604 struct sysib_122 sysib;
2606 memset(&sysib, 0, sizeof(sysib));
2607 stl_p(&sysib.capability, 0x443afc29);
2608 /* XXX change when SMP comes */
2609 stw_p(&sysib.total_cpus, 1);
2610 stw_p(&sysib.active_cpus, 1);
2611 stw_p(&sysib.standby_cpus, 0);
2612 stw_p(&sysib.reserved_cpus, 0);
2613 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2614 } else {
2615 cc = 3;
2617 break;
2618 case STSI_LEVEL_2:
2620 if ((sel1 == 2) && (sel2 == 1)) {
2621 /* LPAR CPU */
2622 struct sysib_221 sysib;
2624 memset(&sysib, 0, sizeof(sysib));
2625 /* XXX make different for different CPUs? */
2626 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2627 ebcdic_put(sysib.plant, "QEMU", 4);
2628 stw_p(&sysib.cpu_addr, env->cpu_num);
2629 stw_p(&sysib.cpu_id, 0);
2630 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2631 } else if ((sel1 == 2) && (sel2 == 2)) {
2632 /* LPAR CPUs */
2633 struct sysib_222 sysib;
2635 memset(&sysib, 0, sizeof(sysib));
2636 stw_p(&sysib.lpar_num, 0);
2637 sysib.lcpuc = 0;
2638 /* XXX change when SMP comes */
2639 stw_p(&sysib.total_cpus, 1);
2640 stw_p(&sysib.conf_cpus, 1);
2641 stw_p(&sysib.standby_cpus, 0);
2642 stw_p(&sysib.reserved_cpus, 0);
2643 ebcdic_put(sysib.name, "QEMU ", 8);
2644 stl_p(&sysib.caf, 1000);
2645 stw_p(&sysib.dedicated_cpus, 0);
2646 stw_p(&sysib.shared_cpus, 0);
2647 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2648 } else {
2649 cc = 3;
2651 break;
2653 case STSI_LEVEL_3:
2655 if ((sel1 == 2) && (sel2 == 2)) {
2656 /* VM CPUs */
2657 struct sysib_322 sysib;
2659 memset(&sysib, 0, sizeof(sysib));
2660 sysib.count = 1;
2661 /* XXX change when SMP comes */
2662 stw_p(&sysib.vm[0].total_cpus, 1);
2663 stw_p(&sysib.vm[0].conf_cpus, 1);
2664 stw_p(&sysib.vm[0].standby_cpus, 0);
2665 stw_p(&sysib.vm[0].reserved_cpus, 0);
2666 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
2667 stl_p(&sysib.vm[0].caf, 1000);
2668 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
2669 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2670 } else {
2671 cc = 3;
2673 break;
2675 case STSI_LEVEL_CURRENT:
2676 env->regs[0] = STSI_LEVEL_3;
2677 break;
2678 default:
2679 cc = 3;
2680 break;
2683 return cc;
2686 void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
2688 int i;
2689 uint64_t src = a2;
2691 for (i = r1;; i = (i + 1) % 16) {
2692 env->cregs[i] = ldq(src);
2693 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
2694 i, src, env->cregs[i]);
2695 src += sizeof(uint64_t);
2697 if (i == r3) {
2698 break;
2702 tlb_flush(env, 1);
2705 void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2707 int i;
2708 uint64_t src = a2;
2710 for (i = r1;; i = (i + 1) % 16) {
2711 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
2712 src += sizeof(uint32_t);
2714 if (i == r3) {
2715 break;
2719 tlb_flush(env, 1);
2722 void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
2724 int i;
2725 uint64_t dest = a2;
2727 for (i = r1;; i = (i + 1) % 16) {
2728 stq(dest, env->cregs[i]);
2729 dest += sizeof(uint64_t);
2731 if (i == r3) {
2732 break;
2737 void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2739 int i;
2740 uint64_t dest = a2;
2742 for (i = r1;; i = (i + 1) % 16) {
2743 stl(dest, env->cregs[i]);
2744 dest += sizeof(uint32_t);
2746 if (i == r3) {
2747 break;
2752 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
2754 /* XXX implement */
2756 return 0;
2759 /* insert storage key extended */
2760 uint64_t HELPER(iske)(uint64_t r2)
2762 uint64_t addr = get_address(0, 0, r2);
2764 if (addr > ram_size) {
2765 return 0;
2768 return env->storage_keys[addr / TARGET_PAGE_SIZE];
2771 /* set storage key extended */
2772 void HELPER(sske)(uint32_t r1, uint64_t r2)
2774 uint64_t addr = get_address(0, 0, r2);
2776 if (addr > ram_size) {
2777 return;
2780 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
2783 /* reset reference bit extended */
2784 uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
2786 uint8_t re;
2787 uint8_t key;
2788 if (r2 > ram_size) {
2789 return 0;
2792 key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
2793 re = key & (SK_R | SK_C);
2794 env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
2797 * cc
2799 * 0 Reference bit zero; change bit zero
2800 * 1 Reference bit zero; change bit one
2801 * 2 Reference bit one; change bit zero
2802 * 3 Reference bit one; change bit one
2805 return re >> 1;
2808 /* compare and swap and purge */
2809 uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
2811 uint32_t cc;
2812 uint32_t o1 = env->regs[r1];
2813 uint64_t a2 = get_address_31fix(r2) & ~3ULL;
2814 uint32_t o2 = ldl(a2);
2816 if (o1 == o2) {
2817 stl(a2, env->regs[(r1 + 1) & 15]);
2818 if (env->regs[r2] & 0x3) {
2819 /* flush TLB / ALB */
2820 tlb_flush(env, 1);
2822 cc = 0;
2823 } else {
2824 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
2825 cc = 1;
2828 return cc;
2831 static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
2832 uint64_t mode2)
2834 target_ulong src, dest;
2835 int flags, cc = 0, i;
2837 if (!l) {
2838 return 0;
2839 } else if (l > 256) {
2840 /* max 256 */
2841 l = 256;
2842 cc = 3;
2845 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
2846 cpu_loop_exit(env);
2848 dest |= a1 & ~TARGET_PAGE_MASK;
2850 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
2851 cpu_loop_exit(env);
2853 src |= a2 & ~TARGET_PAGE_MASK;
2855 /* XXX replace w/ memcpy */
2856 for (i = 0; i < l; i++) {
2857 /* XXX be more clever */
2858 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
2859 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
2860 mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
2861 break;
2863 stb_phys(dest + i, ldub_phys(src + i));
2866 return cc;
2869 uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
2871 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2872 __FUNCTION__, l, a1, a2);
2874 return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
2877 uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
2879 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2880 __FUNCTION__, l, a1, a2);
2882 return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
2885 uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
2887 int cc = 0;
2889 HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
2890 __FUNCTION__, order_code, r1, cpu_addr);
2892 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2893 as parameter (input). Status (output) is always R1. */
2895 switch (order_code) {
2896 case SIGP_SET_ARCH:
2897 /* switch arch */
2898 break;
2899 case SIGP_SENSE:
2900 /* enumerate CPU status */
2901 if (cpu_addr) {
2902 /* XXX implement when SMP comes */
2903 return 3;
2905 env->regs[r1] &= 0xffffffff00000000ULL;
2906 cc = 1;
2907 break;
2908 #if !defined (CONFIG_USER_ONLY)
2909 case SIGP_RESTART:
2910 qemu_system_reset_request();
2911 cpu_loop_exit(env);
2912 break;
2913 case SIGP_STOP:
2914 qemu_system_shutdown_request();
2915 cpu_loop_exit(env);
2916 break;
2917 #endif
2918 default:
2919 /* unknown sigp */
2920 fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
2921 cc = 3;
2924 return cc;
2927 void HELPER(sacf)(uint64_t a1)
2929 HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
2931 switch (a1 & 0xf00) {
2932 case 0x000:
2933 env->psw.mask &= ~PSW_MASK_ASC;
2934 env->psw.mask |= PSW_ASC_PRIMARY;
2935 break;
2936 case 0x100:
2937 env->psw.mask &= ~PSW_MASK_ASC;
2938 env->psw.mask |= PSW_ASC_SECONDARY;
2939 break;
2940 case 0x300:
2941 env->psw.mask &= ~PSW_MASK_ASC;
2942 env->psw.mask |= PSW_ASC_HOME;
2943 break;
2944 default:
2945 qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
2946 program_interrupt(env, PGM_SPECIFICATION, 2);
2947 break;
2951 /* invalidate pte */
2952 void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
2954 uint64_t page = vaddr & TARGET_PAGE_MASK;
2955 uint64_t pte = 0;
2957 /* XXX broadcast to other CPUs */
2959 /* XXX Linux is nice enough to give us the exact pte address.
2960 According to spec we'd have to find it out ourselves */
2961 /* XXX Linux is fine with overwriting the pte, the spec requires
2962 us to only set the invalid bit */
2963 stq_phys(pte_addr, pte | _PAGE_INVALID);
2965 /* XXX we exploit the fact that Linux passes the exact virtual
2966 address here - it's not obliged to! */
2967 tlb_flush_page(env, page);
2969 /* XXX 31-bit hack */
2970 if (page & 0x80000000) {
2971 tlb_flush_page(env, page & ~0x80000000);
2972 } else {
2973 tlb_flush_page(env, page | 0x80000000);
2977 /* flush local tlb */
2978 void HELPER(ptlb)(void)
2980 tlb_flush(env, 1);
2983 /* store using real address */
2984 void HELPER(stura)(uint64_t addr, uint32_t v1)
2986 stw_phys(get_address(0, 0, addr), v1);
2989 /* load real address */
2990 uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
2992 uint32_t cc = 0;
2993 int old_exc = env->exception_index;
2994 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2995 uint64_t ret;
2996 int flags;
2998 /* XXX incomplete - has more corner cases */
2999 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
3000 program_interrupt(env, PGM_SPECIAL_OP, 2);
3003 env->exception_index = old_exc;
3004 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
3005 cc = 3;
3007 if (env->exception_index == EXCP_PGM) {
3008 ret = env->int_pgm_code | 0x80000000;
3009 } else {
3010 ret |= addr & ~TARGET_PAGE_MASK;
3012 env->exception_index = old_exc;
3014 if (!(env->psw.mask & PSW_MASK_64)) {
3015 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
3016 } else {
3017 env->regs[r1] = ret;
3020 return cc;
3023 #endif