scsi: Remove REZERO_UNIT emulation
[qemu/ar7.git] / target-s390x / op_helper.c
blob25a1e81ef4b7a5382af8b55a5a357d82bcfb9bdc
1 /*
2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
24 #include "helpers.h"
25 #include <string.h>
26 #include "kvm.h"
27 #include "qemu-timer.h"
28 #ifdef CONFIG_KVM
29 #include <linux/kvm.h>
30 #endif
32 /*****************************************************************************/
33 /* Softmmu support */
34 #if !defined (CONFIG_USER_ONLY)
35 #include "softmmu_exec.h"
37 #define MMUSUFFIX _mmu
39 #define SHIFT 0
40 #include "softmmu_template.h"
42 #define SHIFT 1
43 #include "softmmu_template.h"
45 #define SHIFT 2
46 #include "softmmu_template.h"
48 #define SHIFT 3
49 #include "softmmu_template.h"
51 /* try to fill the TLB and return an exception if error. If retaddr is
52 NULL, it means that the function was called in C code (i.e. not
53 from generated code or from helper.c) */
54 /* XXX: fix it to restore all registers */
55 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
57 TranslationBlock *tb;
58 CPUState *saved_env;
59 unsigned long pc;
60 int ret;
62 /* XXX: hack to restore env in all cases, even if not called from
63 generated code */
64 saved_env = env;
65 env = cpu_single_env;
66 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
67 if (unlikely(ret != 0)) {
68 if (likely(retaddr)) {
69 /* now we have a real cpu fault */
70 pc = (unsigned long)retaddr;
71 tb = tb_find_pc(pc);
72 if (likely(tb)) {
73 /* the PC is inside the translated code. It means that we have
74 a virtual CPU fault */
75 cpu_restore_state(tb, env, pc);
78 cpu_loop_exit(env);
80 env = saved_env;
83 #endif
85 /* #define DEBUG_HELPER */
86 #ifdef DEBUG_HELPER
87 #define HELPER_LOG(x...) qemu_log(x)
88 #else
89 #define HELPER_LOG(x...)
90 #endif
92 /* raise an exception */
93 void HELPER(exception)(uint32_t excp)
95 HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
96 env->exception_index = excp;
97 cpu_loop_exit(env);
100 #ifndef CONFIG_USER_ONLY
101 static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest,
102 uint8_t byte)
104 target_phys_addr_t dest_phys;
105 target_phys_addr_t len = l;
106 void *dest_p;
107 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
108 int flags;
110 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
111 stb(dest, byte);
112 cpu_abort(env, "should never reach here");
114 dest_phys |= dest & ~TARGET_PAGE_MASK;
116 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
118 memset(dest_p, byte, len);
120 cpu_physical_memory_unmap(dest_p, 1, len, len);
123 static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest,
124 uint64_t src)
126 target_phys_addr_t dest_phys;
127 target_phys_addr_t src_phys;
128 target_phys_addr_t len = l;
129 void *dest_p;
130 void *src_p;
131 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
132 int flags;
134 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
135 stb(dest, 0);
136 cpu_abort(env, "should never reach here");
138 dest_phys |= dest & ~TARGET_PAGE_MASK;
140 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
141 ldub(src);
142 cpu_abort(env, "should never reach here");
144 src_phys |= src & ~TARGET_PAGE_MASK;
146 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
147 src_p = cpu_physical_memory_map(src_phys, &len, 0);
149 memmove(dest_p, src_p, len);
151 cpu_physical_memory_unmap(dest_p, 1, len, len);
152 cpu_physical_memory_unmap(src_p, 0, len, len);
154 #endif
156 /* and on array */
157 uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
159 int i;
160 unsigned char x;
161 uint32_t cc = 0;
163 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
164 __FUNCTION__, l, dest, src);
165 for (i = 0; i <= l; i++) {
166 x = ldub(dest + i) & ldub(src + i);
167 if (x) {
168 cc = 1;
170 stb(dest + i, x);
172 return cc;
175 /* xor on array */
176 uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
178 int i;
179 unsigned char x;
180 uint32_t cc = 0;
182 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
183 __FUNCTION__, l, dest, src);
185 #ifndef CONFIG_USER_ONLY
186 /* xor with itself is the same as memset(0) */
187 if ((l > 32) && (src == dest) &&
188 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
189 mvc_fast_memset(env, l + 1, dest, 0);
190 return 0;
192 #else
193 if (src == dest) {
194 memset(g2h(dest), 0, l + 1);
195 return 0;
197 #endif
199 for (i = 0; i <= l; i++) {
200 x = ldub(dest + i) ^ ldub(src + i);
201 if (x) {
202 cc = 1;
204 stb(dest + i, x);
206 return cc;
209 /* or on array */
210 uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
212 int i;
213 unsigned char x;
214 uint32_t cc = 0;
216 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
217 __FUNCTION__, l, dest, src);
218 for (i = 0; i <= l; i++) {
219 x = ldub(dest + i) | ldub(src + i);
220 if (x) {
221 cc = 1;
223 stb(dest + i, x);
225 return cc;
228 /* memmove */
229 void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
231 int i = 0;
232 int x = 0;
233 uint32_t l_64 = (l + 1) / 8;
235 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
236 __FUNCTION__, l, dest, src);
238 #ifndef CONFIG_USER_ONLY
239 if ((l > 32) &&
240 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
241 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
242 if (dest == (src + 1)) {
243 mvc_fast_memset(env, l + 1, dest, ldub(src));
244 return;
245 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
246 mvc_fast_memmove(env, l + 1, dest, src);
247 return;
250 #else
251 if (dest == (src + 1)) {
252 memset(g2h(dest), ldub(src), l + 1);
253 return;
254 } else {
255 memmove(g2h(dest), g2h(src), l + 1);
256 return;
258 #endif
260 /* handle the parts that fit into 8-byte loads/stores */
261 if (dest != (src + 1)) {
262 for (i = 0; i < l_64; i++) {
263 stq(dest + x, ldq(src + x));
264 x += 8;
268 /* slow version crossing pages with byte accesses */
269 for (i = x; i <= l; i++) {
270 stb(dest + i, ldub(src + i));
274 /* compare unsigned byte arrays */
275 uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
277 int i;
278 unsigned char x,y;
279 uint32_t cc;
280 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
281 __FUNCTION__, l, s1, s2);
282 for (i = 0; i <= l; i++) {
283 x = ldub(s1 + i);
284 y = ldub(s2 + i);
285 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
286 if (x < y) {
287 cc = 1;
288 goto done;
289 } else if (x > y) {
290 cc = 2;
291 goto done;
294 cc = 0;
295 done:
296 HELPER_LOG("\n");
297 return cc;
300 /* compare logical under mask */
301 uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
303 uint8_t r,d;
304 uint32_t cc;
305 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
306 mask, addr);
307 cc = 0;
308 while (mask) {
309 if (mask & 8) {
310 d = ldub(addr);
311 r = (r1 & 0xff000000UL) >> 24;
312 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
313 addr);
314 if (r < d) {
315 cc = 1;
316 break;
317 } else if (r > d) {
318 cc = 2;
319 break;
321 addr++;
323 mask = (mask << 1) & 0xf;
324 r1 <<= 8;
326 HELPER_LOG("\n");
327 return cc;
330 /* store character under mask */
331 void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
333 uint8_t r;
334 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
335 addr);
336 while (mask) {
337 if (mask & 8) {
338 r = (r1 & 0xff000000UL) >> 24;
339 stb(addr, r);
340 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
341 addr++;
343 mask = (mask << 1) & 0xf;
344 r1 <<= 8;
346 HELPER_LOG("\n");
349 /* 64/64 -> 128 unsigned multiplication */
350 void HELPER(mlg)(uint32_t r1, uint64_t v2)
352 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
353 /* assuming 64-bit hosts have __uint128_t */
354 __uint128_t res = (__uint128_t)env->regs[r1 + 1];
355 res *= (__uint128_t)v2;
356 env->regs[r1] = (uint64_t)(res >> 64);
357 env->regs[r1 + 1] = (uint64_t)res;
358 #else
359 mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
360 #endif
363 /* 128 -> 64/64 unsigned division */
364 void HELPER(dlg)(uint32_t r1, uint64_t v2)
366 uint64_t divisor = v2;
368 if (!env->regs[r1]) {
369 /* 64 -> 64/64 case */
370 env->regs[r1] = env->regs[r1+1] % divisor;
371 env->regs[r1+1] = env->regs[r1+1] / divisor;
372 return;
373 } else {
375 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
376 /* assuming 64-bit hosts have __uint128_t */
377 __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
378 (env->regs[r1+1]);
379 __uint128_t quotient = dividend / divisor;
380 env->regs[r1+1] = quotient;
381 __uint128_t remainder = dividend % divisor;
382 env->regs[r1] = remainder;
383 #else
384 /* 32-bit hosts would need special wrapper functionality - just abort if
385 we encounter such a case; it's very unlikely anyways. */
386 cpu_abort(env, "128 -> 64/64 division not implemented\n");
387 #endif
391 static inline uint64_t get_address(int x2, int b2, int d2)
393 uint64_t r = d2;
395 if (x2) {
396 r += env->regs[x2];
399 if (b2) {
400 r += env->regs[b2];
403 /* 31-Bit mode */
404 if (!(env->psw.mask & PSW_MASK_64)) {
405 r &= 0x7fffffff;
408 return r;
411 static inline uint64_t get_address_31fix(int reg)
413 uint64_t r = env->regs[reg];
415 /* 31-Bit mode */
416 if (!(env->psw.mask & PSW_MASK_64)) {
417 r &= 0x7fffffff;
420 return r;
423 /* search string (c is byte to search, r2 is string, r1 end of string) */
424 uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
426 uint64_t i;
427 uint32_t cc = 2;
428 uint64_t str = get_address_31fix(r2);
429 uint64_t end = get_address_31fix(r1);
431 HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
432 c, env->regs[r1], env->regs[r2]);
434 for (i = str; i != end; i++) {
435 if (ldub(i) == c) {
436 env->regs[r1] = i;
437 cc = 1;
438 break;
442 return cc;
445 /* unsigned string compare (c is string terminator) */
446 uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
448 uint64_t s1 = get_address_31fix(r1);
449 uint64_t s2 = get_address_31fix(r2);
450 uint8_t v1, v2;
451 uint32_t cc;
452 c = c & 0xff;
453 #ifdef CONFIG_USER_ONLY
454 if (!c) {
455 HELPER_LOG("%s: comparing '%s' and '%s'\n",
456 __FUNCTION__, (char*)g2h(s1), (char*)g2h(s2));
458 #endif
459 for (;;) {
460 v1 = ldub(s1);
461 v2 = ldub(s2);
462 if ((v1 == c || v2 == c) || (v1 != v2)) {
463 break;
465 s1++;
466 s2++;
469 if (v1 == v2) {
470 cc = 0;
471 } else {
472 cc = (v1 < v2) ? 1 : 2;
473 /* FIXME: 31-bit mode! */
474 env->regs[r1] = s1;
475 env->regs[r2] = s2;
477 return cc;
480 /* move page */
481 void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
483 /* XXX missing r0 handling */
484 #ifdef CONFIG_USER_ONLY
485 int i;
487 for (i = 0; i < TARGET_PAGE_SIZE; i++) {
488 stb(r1 + i, ldub(r2 + i));
490 #else
491 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
492 #endif
495 /* string copy (c is string terminator) */
496 void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
498 uint64_t dest = get_address_31fix(r1);
499 uint64_t src = get_address_31fix(r2);
500 uint8_t v;
501 c = c & 0xff;
502 #ifdef CONFIG_USER_ONLY
503 if (!c) {
504 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src),
505 dest);
507 #endif
508 for (;;) {
509 v = ldub(src);
510 stb(dest, v);
511 if (v == c) {
512 break;
514 src++;
515 dest++;
517 env->regs[r1] = dest; /* FIXME: 31-bit mode! */
520 /* compare and swap 64-bit */
521 uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
523 /* FIXME: locking? */
524 uint32_t cc;
525 uint64_t v2 = ldq(a2);
526 if (env->regs[r1] == v2) {
527 cc = 0;
528 stq(a2, env->regs[r3]);
529 } else {
530 cc = 1;
531 env->regs[r1] = v2;
533 return cc;
536 /* compare double and swap 64-bit */
537 uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
539 /* FIXME: locking? */
540 uint32_t cc;
541 uint64_t v2_hi = ldq(a2);
542 uint64_t v2_lo = ldq(a2 + 8);
543 uint64_t v1_hi = env->regs[r1];
544 uint64_t v1_lo = env->regs[r1 + 1];
546 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
547 cc = 0;
548 stq(a2, env->regs[r3]);
549 stq(a2 + 8, env->regs[r3 + 1]);
550 } else {
551 cc = 1;
552 env->regs[r1] = v2_hi;
553 env->regs[r1 + 1] = v2_lo;
556 return cc;
559 /* compare and swap 32-bit */
560 uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
562 /* FIXME: locking? */
563 uint32_t cc;
564 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
565 uint32_t v2 = ldl(a2);
566 if (((uint32_t)env->regs[r1]) == v2) {
567 cc = 0;
568 stl(a2, (uint32_t)env->regs[r3]);
569 } else {
570 cc = 1;
571 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
573 return cc;
576 static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
578 int pos = 24; /* top of the lower half of r1 */
579 uint64_t rmask = 0xff000000ULL;
580 uint8_t val = 0;
581 int ccd = 0;
582 uint32_t cc = 0;
584 while (mask) {
585 if (mask & 8) {
586 env->regs[r1] &= ~rmask;
587 val = ldub(address);
588 if ((val & 0x80) && !ccd) {
589 cc = 1;
591 ccd = 1;
592 if (val && cc == 0) {
593 cc = 2;
595 env->regs[r1] |= (uint64_t)val << pos;
596 address++;
598 mask = (mask << 1) & 0xf;
599 pos -= 8;
600 rmask >>= 8;
603 return cc;
606 /* execute instruction
607 this instruction executes an insn modified with the contents of r1
608 it does not change the executed instruction in memory
609 it does not change the program counter
610 in other words: tricky...
611 currently implemented by interpreting the cases it is most commonly used in
613 uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
615 uint16_t insn = lduw_code(addr);
616 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
617 insn);
618 if ((insn & 0xf0ff) == 0xd000) {
619 uint32_t l, insn2, b1, b2, d1, d2;
620 l = v1 & 0xff;
621 insn2 = ldl_code(addr + 2);
622 b1 = (insn2 >> 28) & 0xf;
623 b2 = (insn2 >> 12) & 0xf;
624 d1 = (insn2 >> 16) & 0xfff;
625 d2 = insn2 & 0xfff;
626 switch (insn & 0xf00) {
627 case 0x200:
628 helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
629 break;
630 case 0x500:
631 cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
632 break;
633 case 0x700:
634 cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
635 break;
636 default:
637 goto abort;
638 break;
640 } else if ((insn & 0xff00) == 0x0a00) {
641 /* supervisor call */
642 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
643 env->psw.addr = ret - 4;
644 env->int_svc_code = (insn|v1) & 0xff;
645 env->int_svc_ilc = 4;
646 helper_exception(EXCP_SVC);
647 } else if ((insn & 0xff00) == 0xbf00) {
648 uint32_t insn2, r1, r3, b2, d2;
649 insn2 = ldl_code(addr + 2);
650 r1 = (insn2 >> 20) & 0xf;
651 r3 = (insn2 >> 16) & 0xf;
652 b2 = (insn2 >> 12) & 0xf;
653 d2 = insn2 & 0xfff;
654 cc = helper_icm(r1, get_address(0, b2, d2), r3);
655 } else {
656 abort:
657 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
658 insn);
660 return cc;
663 /* absolute value 32-bit */
664 uint32_t HELPER(abs_i32)(int32_t val)
666 if (val < 0) {
667 return -val;
668 } else {
669 return val;
673 /* negative absolute value 32-bit */
674 int32_t HELPER(nabs_i32)(int32_t val)
676 if (val < 0) {
677 return val;
678 } else {
679 return -val;
683 /* absolute value 64-bit */
684 uint64_t HELPER(abs_i64)(int64_t val)
686 HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
688 if (val < 0) {
689 return -val;
690 } else {
691 return val;
695 /* negative absolute value 64-bit */
696 int64_t HELPER(nabs_i64)(int64_t val)
698 if (val < 0) {
699 return val;
700 } else {
701 return -val;
705 /* add with carry 32-bit unsigned */
706 uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
708 uint32_t res;
710 res = v1 + v2;
711 if (cc & 2) {
712 res++;
715 return res;
718 /* store character under mask high operates on the upper half of r1 */
719 void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
721 int pos = 56; /* top of the upper half of r1 */
723 while (mask) {
724 if (mask & 8) {
725 stb(address, (env->regs[r1] >> pos) & 0xff);
726 address++;
728 mask = (mask << 1) & 0xf;
729 pos -= 8;
733 /* insert character under mask high; same as icm, but operates on the
734 upper half of r1 */
735 uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
737 int pos = 56; /* top of the upper half of r1 */
738 uint64_t rmask = 0xff00000000000000ULL;
739 uint8_t val = 0;
740 int ccd = 0;
741 uint32_t cc = 0;
743 while (mask) {
744 if (mask & 8) {
745 env->regs[r1] &= ~rmask;
746 val = ldub(address);
747 if ((val & 0x80) && !ccd) {
748 cc = 1;
750 ccd = 1;
751 if (val && cc == 0) {
752 cc = 2;
754 env->regs[r1] |= (uint64_t)val << pos;
755 address++;
757 mask = (mask << 1) & 0xf;
758 pos -= 8;
759 rmask >>= 8;
762 return cc;
765 /* insert psw mask and condition code into r1 */
766 void HELPER(ipm)(uint32_t cc, uint32_t r1)
768 uint64_t r = env->regs[r1];
770 r &= 0xffffffff00ffffffULL;
771 r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
772 env->regs[r1] = r;
773 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
774 cc, env->psw.mask, r);
777 /* load access registers r1 to r3 from memory at a2 */
778 void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
780 int i;
782 for (i = r1;; i = (i + 1) % 16) {
783 env->aregs[i] = ldl(a2);
784 a2 += 4;
786 if (i == r3) {
787 break;
792 /* store access registers r1 to r3 in memory at a2 */
793 void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
795 int i;
797 for (i = r1;; i = (i + 1) % 16) {
798 stl(a2, env->aregs[i]);
799 a2 += 4;
801 if (i == r3) {
802 break;
807 /* move long */
808 uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
810 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
811 uint64_t dest = get_address_31fix(r1);
812 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
813 uint64_t src = get_address_31fix(r2);
814 uint8_t pad = src >> 24;
815 uint8_t v;
816 uint32_t cc;
818 if (destlen == srclen) {
819 cc = 0;
820 } else if (destlen < srclen) {
821 cc = 1;
822 } else {
823 cc = 2;
826 if (srclen > destlen) {
827 srclen = destlen;
830 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
831 v = ldub(src);
832 stb(dest, v);
835 for (; destlen; dest++, destlen--) {
836 stb(dest, pad);
839 env->regs[r1 + 1] = destlen;
840 /* can't use srclen here, we trunc'ed it */
841 env->regs[r2 + 1] -= src - env->regs[r2];
842 env->regs[r1] = dest;
843 env->regs[r2] = src;
845 return cc;
848 /* move long extended another memcopy insn with more bells and whistles */
849 uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
851 uint64_t destlen = env->regs[r1 + 1];
852 uint64_t dest = env->regs[r1];
853 uint64_t srclen = env->regs[r3 + 1];
854 uint64_t src = env->regs[r3];
855 uint8_t pad = a2 & 0xff;
856 uint8_t v;
857 uint32_t cc;
859 if (!(env->psw.mask & PSW_MASK_64)) {
860 destlen = (uint32_t)destlen;
861 srclen = (uint32_t)srclen;
862 dest &= 0x7fffffff;
863 src &= 0x7fffffff;
866 if (destlen == srclen) {
867 cc = 0;
868 } else if (destlen < srclen) {
869 cc = 1;
870 } else {
871 cc = 2;
874 if (srclen > destlen) {
875 srclen = destlen;
878 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
879 v = ldub(src);
880 stb(dest, v);
883 for (; destlen; dest++, destlen--) {
884 stb(dest, pad);
887 env->regs[r1 + 1] = destlen;
888 /* can't use srclen here, we trunc'ed it */
889 /* FIXME: 31-bit mode! */
890 env->regs[r3 + 1] -= src - env->regs[r3];
891 env->regs[r1] = dest;
892 env->regs[r3] = src;
894 return cc;
897 /* compare logical long extended memcompare insn with padding */
898 uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
900 uint64_t destlen = env->regs[r1 + 1];
901 uint64_t dest = get_address_31fix(r1);
902 uint64_t srclen = env->regs[r3 + 1];
903 uint64_t src = get_address_31fix(r3);
904 uint8_t pad = a2 & 0xff;
905 uint8_t v1 = 0,v2 = 0;
906 uint32_t cc = 0;
908 if (!(destlen || srclen)) {
909 return cc;
912 if (srclen > destlen) {
913 srclen = destlen;
916 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
917 v1 = srclen ? ldub(src) : pad;
918 v2 = destlen ? ldub(dest) : pad;
919 if (v1 != v2) {
920 cc = (v1 < v2) ? 1 : 2;
921 break;
925 env->regs[r1 + 1] = destlen;
926 /* can't use srclen here, we trunc'ed it */
927 env->regs[r3 + 1] -= src - env->regs[r3];
928 env->regs[r1] = dest;
929 env->regs[r3] = src;
931 return cc;
934 /* subtract unsigned v2 from v1 with borrow */
935 uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
937 uint32_t v1 = env->regs[r1];
938 uint32_t res = v1 + (~v2) + (cc >> 1);
940 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
941 if (cc & 2) {
942 /* borrow */
943 return v1 ? 1 : 0;
944 } else {
945 return v1 ? 3 : 2;
949 /* subtract unsigned v2 from v1 with borrow */
950 uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
952 uint64_t res = v1 + (~v2) + (cc >> 1);
954 env->regs[r1] = res;
955 if (cc & 2) {
956 /* borrow */
957 return v1 ? 1 : 0;
958 } else {
959 return v1 ? 3 : 2;
963 static inline int float_comp_to_cc(int float_compare)
965 switch (float_compare) {
966 case float_relation_equal:
967 return 0;
968 case float_relation_less:
969 return 1;
970 case float_relation_greater:
971 return 2;
972 case float_relation_unordered:
973 return 3;
974 default:
975 cpu_abort(env, "unknown return value for float compare\n");
979 /* condition codes for binary FP ops */
980 static uint32_t set_cc_f32(float32 v1, float32 v2)
982 return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
985 static uint32_t set_cc_f64(float64 v1, float64 v2)
987 return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
990 /* condition codes for unary FP ops */
991 static uint32_t set_cc_nz_f32(float32 v)
993 if (float32_is_any_nan(v)) {
994 return 3;
995 } else if (float32_is_zero(v)) {
996 return 0;
997 } else if (float32_is_neg(v)) {
998 return 1;
999 } else {
1000 return 2;
1004 static uint32_t set_cc_nz_f64(float64 v)
1006 if (float64_is_any_nan(v)) {
1007 return 3;
1008 } else if (float64_is_zero(v)) {
1009 return 0;
1010 } else if (float64_is_neg(v)) {
1011 return 1;
1012 } else {
1013 return 2;
1017 static uint32_t set_cc_nz_f128(float128 v)
1019 if (float128_is_any_nan(v)) {
1020 return 3;
1021 } else if (float128_is_zero(v)) {
1022 return 0;
1023 } else if (float128_is_neg(v)) {
1024 return 1;
1025 } else {
1026 return 2;
1030 /* convert 32-bit int to 64-bit float */
1031 void HELPER(cdfbr)(uint32_t f1, int32_t v2)
1033 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
1034 env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
1037 /* convert 32-bit int to 128-bit float */
1038 void HELPER(cxfbr)(uint32_t f1, int32_t v2)
1040 CPU_QuadU v1;
1041 v1.q = int32_to_float128(v2, &env->fpu_status);
1042 env->fregs[f1].ll = v1.ll.upper;
1043 env->fregs[f1 + 2].ll = v1.ll.lower;
1046 /* convert 64-bit int to 32-bit float */
1047 void HELPER(cegbr)(uint32_t f1, int64_t v2)
1049 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1050 env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
1053 /* convert 64-bit int to 64-bit float */
1054 void HELPER(cdgbr)(uint32_t f1, int64_t v2)
1056 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1057 env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
1060 /* convert 64-bit int to 128-bit float */
1061 void HELPER(cxgbr)(uint32_t f1, int64_t v2)
1063 CPU_QuadU x1;
1064 x1.q = int64_to_float128(v2, &env->fpu_status);
1065 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
1066 x1.ll.upper, x1.ll.lower);
1067 env->fregs[f1].ll = x1.ll.upper;
1068 env->fregs[f1 + 2].ll = x1.ll.lower;
1071 /* convert 32-bit int to 32-bit float */
1072 void HELPER(cefbr)(uint32_t f1, int32_t v2)
1074 env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
1075 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
1076 env->fregs[f1].l.upper, f1);
1079 /* 32-bit FP addition RR */
1080 uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
1082 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1083 env->fregs[f2].l.upper,
1084 &env->fpu_status);
1085 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1086 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1088 return set_cc_nz_f32(env->fregs[f1].l.upper);
1091 /* 64-bit FP addition RR */
1092 uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
1094 env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
1095 &env->fpu_status);
1096 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
1097 env->fregs[f2].d, env->fregs[f1].d, f1);
1099 return set_cc_nz_f64(env->fregs[f1].d);
1102 /* 32-bit FP subtraction RR */
1103 uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
1105 env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
1106 env->fregs[f2].l.upper,
1107 &env->fpu_status);
1108 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1109 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1111 return set_cc_nz_f32(env->fregs[f1].l.upper);
1114 /* 64-bit FP subtraction RR */
1115 uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
1117 env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
1118 &env->fpu_status);
1119 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1120 __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
1122 return set_cc_nz_f64(env->fregs[f1].d);
1125 /* 32-bit FP division RR */
1126 void HELPER(debr)(uint32_t f1, uint32_t f2)
1128 env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
1129 env->fregs[f2].l.upper,
1130 &env->fpu_status);
1133 /* 128-bit FP division RR */
1134 void HELPER(dxbr)(uint32_t f1, uint32_t f2)
1136 CPU_QuadU v1;
1137 v1.ll.upper = env->fregs[f1].ll;
1138 v1.ll.lower = env->fregs[f1 + 2].ll;
1139 CPU_QuadU v2;
1140 v2.ll.upper = env->fregs[f2].ll;
1141 v2.ll.lower = env->fregs[f2 + 2].ll;
1142 CPU_QuadU res;
1143 res.q = float128_div(v1.q, v2.q, &env->fpu_status);
1144 env->fregs[f1].ll = res.ll.upper;
1145 env->fregs[f1 + 2].ll = res.ll.lower;
1148 /* 64-bit FP multiplication RR */
1149 void HELPER(mdbr)(uint32_t f1, uint32_t f2)
1151 env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
1152 &env->fpu_status);
1155 /* 128-bit FP multiplication RR */
1156 void HELPER(mxbr)(uint32_t f1, uint32_t f2)
1158 CPU_QuadU v1;
1159 v1.ll.upper = env->fregs[f1].ll;
1160 v1.ll.lower = env->fregs[f1 + 2].ll;
1161 CPU_QuadU v2;
1162 v2.ll.upper = env->fregs[f2].ll;
1163 v2.ll.lower = env->fregs[f2 + 2].ll;
1164 CPU_QuadU res;
1165 res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
1166 env->fregs[f1].ll = res.ll.upper;
1167 env->fregs[f1 + 2].ll = res.ll.lower;
1170 /* convert 32-bit float to 64-bit float */
1171 void HELPER(ldebr)(uint32_t r1, uint32_t r2)
1173 env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
1174 &env->fpu_status);
1177 /* convert 128-bit float to 64-bit float */
1178 void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
1180 CPU_QuadU x2;
1181 x2.ll.upper = env->fregs[f2].ll;
1182 x2.ll.lower = env->fregs[f2 + 2].ll;
1183 env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
1184 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
1187 /* convert 64-bit float to 128-bit float */
1188 void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
1190 CPU_QuadU res;
1191 res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
1192 env->fregs[f1].ll = res.ll.upper;
1193 env->fregs[f1 + 2].ll = res.ll.lower;
1196 /* convert 64-bit float to 32-bit float */
1197 void HELPER(ledbr)(uint32_t f1, uint32_t f2)
1199 float64 d2 = env->fregs[f2].d;
1200 env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
1203 /* convert 128-bit float to 32-bit float */
1204 void HELPER(lexbr)(uint32_t f1, uint32_t f2)
1206 CPU_QuadU x2;
1207 x2.ll.upper = env->fregs[f2].ll;
1208 x2.ll.lower = env->fregs[f2 + 2].ll;
1209 env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
1210 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
1213 /* absolute value of 32-bit float */
1214 uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
1216 float32 v1;
1217 float32 v2 = env->fregs[f2].d;
1218 v1 = float32_abs(v2);
1219 env->fregs[f1].d = v1;
1220 return set_cc_nz_f32(v1);
1223 /* absolute value of 64-bit float */
1224 uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
1226 float64 v1;
1227 float64 v2 = env->fregs[f2].d;
1228 v1 = float64_abs(v2);
1229 env->fregs[f1].d = v1;
1230 return set_cc_nz_f64(v1);
1233 /* absolute value of 128-bit float */
1234 uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
1236 CPU_QuadU v1;
1237 CPU_QuadU v2;
1238 v2.ll.upper = env->fregs[f2].ll;
1239 v2.ll.lower = env->fregs[f2 + 2].ll;
1240 v1.q = float128_abs(v2.q);
1241 env->fregs[f1].ll = v1.ll.upper;
1242 env->fregs[f1 + 2].ll = v1.ll.lower;
1243 return set_cc_nz_f128(v1.q);
1246 /* load and test 64-bit float */
1247 uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
1249 env->fregs[f1].d = env->fregs[f2].d;
1250 return set_cc_nz_f64(env->fregs[f1].d);
1253 /* load and test 32-bit float */
1254 uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
1256 env->fregs[f1].l.upper = env->fregs[f2].l.upper;
1257 return set_cc_nz_f32(env->fregs[f1].l.upper);
1260 /* load and test 128-bit float */
1261 uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
1263 CPU_QuadU x;
1264 x.ll.upper = env->fregs[f2].ll;
1265 x.ll.lower = env->fregs[f2 + 2].ll;
1266 env->fregs[f1].ll = x.ll.upper;
1267 env->fregs[f1 + 2].ll = x.ll.lower;
1268 return set_cc_nz_f128(x.q);
1271 /* load complement of 32-bit float */
1272 uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
1274 env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
1276 return set_cc_nz_f32(env->fregs[f1].l.upper);
1279 /* load complement of 64-bit float */
1280 uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
1282 env->fregs[f1].d = float64_chs(env->fregs[f2].d);
1284 return set_cc_nz_f64(env->fregs[f1].d);
1287 /* load complement of 128-bit float */
1288 uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
1290 CPU_QuadU x1, x2;
1291 x2.ll.upper = env->fregs[f2].ll;
1292 x2.ll.lower = env->fregs[f2 + 2].ll;
1293 x1.q = float128_chs(x2.q);
1294 env->fregs[f1].ll = x1.ll.upper;
1295 env->fregs[f1 + 2].ll = x1.ll.lower;
1296 return set_cc_nz_f128(x1.q);
1299 /* 32-bit FP addition RM */
1300 void HELPER(aeb)(uint32_t f1, uint32_t val)
1302 float32 v1 = env->fregs[f1].l.upper;
1303 CPU_FloatU v2;
1304 v2.l = val;
1305 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
1306 v1, f1, v2.f);
1307 env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
1310 /* 32-bit FP division RM */
1311 void HELPER(deb)(uint32_t f1, uint32_t val)
1313 float32 v1 = env->fregs[f1].l.upper;
1314 CPU_FloatU v2;
1315 v2.l = val;
1316 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
1317 v1, f1, v2.f);
1318 env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
1321 /* 32-bit FP multiplication RM */
1322 void HELPER(meeb)(uint32_t f1, uint32_t val)
1324 float32 v1 = env->fregs[f1].l.upper;
1325 CPU_FloatU v2;
1326 v2.l = val;
1327 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
1328 v1, f1, v2.f);
1329 env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
1332 /* 32-bit FP compare RR */
1333 uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
1335 float32 v1 = env->fregs[f1].l.upper;
1336 float32 v2 = env->fregs[f2].l.upper;;
1337 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
1338 v1, f1, v2);
1339 return set_cc_f32(v1, v2);
1342 /* 64-bit FP compare RR */
1343 uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
1345 float64 v1 = env->fregs[f1].d;
1346 float64 v2 = env->fregs[f2].d;;
1347 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
1348 v1, f1, v2);
1349 return set_cc_f64(v1, v2);
1352 /* 128-bit FP compare RR */
1353 uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
1355 CPU_QuadU v1;
1356 v1.ll.upper = env->fregs[f1].ll;
1357 v1.ll.lower = env->fregs[f1 + 2].ll;
1358 CPU_QuadU v2;
1359 v2.ll.upper = env->fregs[f2].ll;
1360 v2.ll.lower = env->fregs[f2 + 2].ll;
1362 return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
1363 &env->fpu_status));
1366 /* 64-bit FP compare RM */
1367 uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
1369 float64 v1 = env->fregs[f1].d;
1370 CPU_DoubleU v2;
1371 v2.ll = ldq(a2);
1372 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
1373 f1, v2.d);
1374 return set_cc_f64(v1, v2.d);
1377 /* 64-bit FP addition RM */
1378 uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
1380 float64 v1 = env->fregs[f1].d;
1381 CPU_DoubleU v2;
1382 v2.ll = ldq(a2);
1383 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
1384 v1, f1, v2.d);
1385 env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
1386 return set_cc_nz_f64(v1);
1389 /* 32-bit FP subtraction RM */
1390 void HELPER(seb)(uint32_t f1, uint32_t val)
1392 float32 v1 = env->fregs[f1].l.upper;
1393 CPU_FloatU v2;
1394 v2.l = val;
1395 env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
1398 /* 64-bit FP subtraction RM */
1399 uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
1401 float64 v1 = env->fregs[f1].d;
1402 CPU_DoubleU v2;
1403 v2.ll = ldq(a2);
1404 env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
1405 return set_cc_nz_f64(v1);
1408 /* 64-bit FP multiplication RM */
1409 void HELPER(mdb)(uint32_t f1, uint64_t a2)
1411 float64 v1 = env->fregs[f1].d;
1412 CPU_DoubleU v2;
1413 v2.ll = ldq(a2);
1414 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
1415 v1, f1, v2.d);
1416 env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
1419 /* 64-bit FP division RM */
1420 void HELPER(ddb)(uint32_t f1, uint64_t a2)
1422 float64 v1 = env->fregs[f1].d;
1423 CPU_DoubleU v2;
1424 v2.ll = ldq(a2);
1425 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
1426 v1, f1, v2.d);
1427 env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
1430 static void set_round_mode(int m3)
1432 switch (m3) {
1433 case 0:
1434 /* current mode */
1435 break;
1436 case 1:
1437 /* biased round no nearest */
1438 case 4:
1439 /* round to nearest */
1440 set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
1441 break;
1442 case 5:
1443 /* round to zero */
1444 set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
1445 break;
1446 case 6:
1447 /* round to +inf */
1448 set_float_rounding_mode(float_round_up, &env->fpu_status);
1449 break;
1450 case 7:
1451 /* round to -inf */
1452 set_float_rounding_mode(float_round_down, &env->fpu_status);
1453 break;
1457 /* convert 32-bit float to 64-bit int */
1458 uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1460 float32 v2 = env->fregs[f2].l.upper;
1461 set_round_mode(m3);
1462 env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
1463 return set_cc_nz_f32(v2);
1466 /* convert 64-bit float to 64-bit int */
1467 uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1469 float64 v2 = env->fregs[f2].d;
1470 set_round_mode(m3);
1471 env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
1472 return set_cc_nz_f64(v2);
1475 /* convert 128-bit float to 64-bit int */
1476 uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1478 CPU_QuadU v2;
1479 v2.ll.upper = env->fregs[f2].ll;
1480 v2.ll.lower = env->fregs[f2 + 2].ll;
1481 set_round_mode(m3);
1482 env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
1483 if (float128_is_any_nan(v2.q)) {
1484 return 3;
1485 } else if (float128_is_zero(v2.q)) {
1486 return 0;
1487 } else if (float128_is_neg(v2.q)) {
1488 return 1;
1489 } else {
1490 return 2;
1494 /* convert 32-bit float to 32-bit int */
1495 uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1497 float32 v2 = env->fregs[f2].l.upper;
1498 set_round_mode(m3);
1499 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1500 float32_to_int32(v2, &env->fpu_status);
1501 return set_cc_nz_f32(v2);
1504 /* convert 64-bit float to 32-bit int */
1505 uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1507 float64 v2 = env->fregs[f2].d;
1508 set_round_mode(m3);
1509 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1510 float64_to_int32(v2, &env->fpu_status);
1511 return set_cc_nz_f64(v2);
1514 /* convert 128-bit float to 32-bit int */
1515 uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1517 CPU_QuadU v2;
1518 v2.ll.upper = env->fregs[f2].ll;
1519 v2.ll.lower = env->fregs[f2 + 2].ll;
1520 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1521 float128_to_int32(v2.q, &env->fpu_status);
1522 return set_cc_nz_f128(v2.q);
1525 /* load 32-bit FP zero */
1526 void HELPER(lzer)(uint32_t f1)
1528 env->fregs[f1].l.upper = float32_zero;
1531 /* load 64-bit FP zero */
1532 void HELPER(lzdr)(uint32_t f1)
1534 env->fregs[f1].d = float64_zero;
1537 /* load 128-bit FP zero */
1538 void HELPER(lzxr)(uint32_t f1)
1540 CPU_QuadU x;
1541 x.q = float64_to_float128(float64_zero, &env->fpu_status);
1542 env->fregs[f1].ll = x.ll.upper;
1543 env->fregs[f1 + 1].ll = x.ll.lower;
1546 /* 128-bit FP subtraction RR */
1547 uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
1549 CPU_QuadU v1;
1550 v1.ll.upper = env->fregs[f1].ll;
1551 v1.ll.lower = env->fregs[f1 + 2].ll;
1552 CPU_QuadU v2;
1553 v2.ll.upper = env->fregs[f2].ll;
1554 v2.ll.lower = env->fregs[f2 + 2].ll;
1555 CPU_QuadU res;
1556 res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
1557 env->fregs[f1].ll = res.ll.upper;
1558 env->fregs[f1 + 2].ll = res.ll.lower;
1559 return set_cc_nz_f128(res.q);
1562 /* 128-bit FP addition RR */
1563 uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
1565 CPU_QuadU v1;
1566 v1.ll.upper = env->fregs[f1].ll;
1567 v1.ll.lower = env->fregs[f1 + 2].ll;
1568 CPU_QuadU v2;
1569 v2.ll.upper = env->fregs[f2].ll;
1570 v2.ll.lower = env->fregs[f2 + 2].ll;
1571 CPU_QuadU res;
1572 res.q = float128_add(v1.q, v2.q, &env->fpu_status);
1573 env->fregs[f1].ll = res.ll.upper;
1574 env->fregs[f1 + 2].ll = res.ll.lower;
1575 return set_cc_nz_f128(res.q);
1578 /* 32-bit FP multiplication RR */
1579 void HELPER(meebr)(uint32_t f1, uint32_t f2)
1581 env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
1582 env->fregs[f2].l.upper,
1583 &env->fpu_status);
1586 /* 64-bit FP division RR */
1587 void HELPER(ddbr)(uint32_t f1, uint32_t f2)
1589 env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
1590 &env->fpu_status);
1593 /* 64-bit FP multiply and add RM */
1594 void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
1596 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
1597 CPU_DoubleU v2;
1598 v2.ll = ldq(a2);
1599 env->fregs[f1].d = float64_add(env->fregs[f1].d,
1600 float64_mul(v2.d, env->fregs[f3].d,
1601 &env->fpu_status),
1602 &env->fpu_status);
1605 /* 64-bit FP multiply and add RR */
1606 void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1608 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1609 env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
1610 env->fregs[f3].d,
1611 &env->fpu_status),
1612 env->fregs[f1].d, &env->fpu_status);
1615 /* 64-bit FP multiply and subtract RR */
1616 void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1618 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1619 env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
1620 env->fregs[f3].d,
1621 &env->fpu_status),
1622 env->fregs[f1].d, &env->fpu_status);
1625 /* 32-bit FP multiply and add RR */
1626 void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
1628 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1629 float32_mul(env->fregs[f2].l.upper,
1630 env->fregs[f3].l.upper,
1631 &env->fpu_status),
1632 &env->fpu_status);
1635 /* convert 64-bit float to 128-bit float */
1636 void HELPER(lxdb)(uint32_t f1, uint64_t a2)
1638 CPU_DoubleU v2;
1639 v2.ll = ldq(a2);
1640 CPU_QuadU v1;
1641 v1.q = float64_to_float128(v2.d, &env->fpu_status);
1642 env->fregs[f1].ll = v1.ll.upper;
1643 env->fregs[f1 + 2].ll = v1.ll.lower;
1646 /* test data class 32-bit */
1647 uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
1649 float32 v1 = env->fregs[f1].l.upper;
1650 int neg = float32_is_neg(v1);
1651 uint32_t cc = 0;
1653 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
1654 if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1655 (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1656 (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1657 (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1658 cc = 1;
1659 } else if (m2 & (1 << (9-neg))) {
1660 /* assume normalized number */
1661 cc = 1;
1664 /* FIXME: denormalized? */
1665 return cc;
1668 /* test data class 64-bit */
1669 uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
1671 float64 v1 = env->fregs[f1].d;
1672 int neg = float64_is_neg(v1);
1673 uint32_t cc = 0;
1675 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
1676 if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1677 (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1678 (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1679 (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1680 cc = 1;
1681 } else if (m2 & (1 << (9-neg))) {
1682 /* assume normalized number */
1683 cc = 1;
1685 /* FIXME: denormalized? */
1686 return cc;
1689 /* test data class 128-bit */
1690 uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
1692 CPU_QuadU v1;
1693 uint32_t cc = 0;
1694 v1.ll.upper = env->fregs[f1].ll;
1695 v1.ll.lower = env->fregs[f1 + 2].ll;
1697 int neg = float128_is_neg(v1.q);
1698 if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
1699 (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
1700 (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
1701 (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
1702 cc = 1;
1703 } else if (m2 & (1 << (9-neg))) {
1704 /* assume normalized number */
1705 cc = 1;
1707 /* FIXME: denormalized? */
1708 return cc;
1711 /* find leftmost one */
1712 uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
1714 uint64_t res = 0;
1715 uint64_t ov2 = v2;
1717 while (!(v2 & 0x8000000000000000ULL) && v2) {
1718 v2 <<= 1;
1719 res++;
1722 if (!v2) {
1723 env->regs[r1] = 64;
1724 env->regs[r1 + 1] = 0;
1725 return 0;
1726 } else {
1727 env->regs[r1] = res;
1728 env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
1729 return 2;
1733 /* square root 64-bit RR */
1734 void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
1736 env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
1739 /* checksum */
1740 void HELPER(cksm)(uint32_t r1, uint32_t r2)
1742 uint64_t src = get_address_31fix(r2);
1743 uint64_t src_len = env->regs[(r2 + 1) & 15];
1744 uint64_t cksm = (uint32_t)env->regs[r1];
1746 while (src_len >= 4) {
1747 cksm += ldl(src);
1749 /* move to next word */
1750 src_len -= 4;
1751 src += 4;
1754 switch (src_len) {
1755 case 0:
1756 break;
1757 case 1:
1758 cksm += ldub(src) << 24;
1759 break;
1760 case 2:
1761 cksm += lduw(src) << 16;
1762 break;
1763 case 3:
1764 cksm += lduw(src) << 16;
1765 cksm += ldub(src + 2) << 8;
1766 break;
1769 /* indicate we've processed everything */
1770 env->regs[r2] = src + src_len;
1771 env->regs[(r2 + 1) & 15] = 0;
1773 /* store result */
1774 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1775 ((uint32_t)cksm + (cksm >> 32));
1778 static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src,
1779 int32_t dst)
1781 if (src == dst) {
1782 return 0;
1783 } else if (src < dst) {
1784 return 1;
1785 } else {
1786 return 2;
1790 static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst)
1792 return cc_calc_ltgt_32(env, dst, 0);
1795 static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src,
1796 int64_t dst)
1798 if (src == dst) {
1799 return 0;
1800 } else if (src < dst) {
1801 return 1;
1802 } else {
1803 return 2;
1807 static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst)
1809 return cc_calc_ltgt_64(env, dst, 0);
1812 static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src,
1813 uint32_t dst)
1815 if (src == dst) {
1816 return 0;
1817 } else if (src < dst) {
1818 return 1;
1819 } else {
1820 return 2;
1824 static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src,
1825 uint64_t dst)
1827 if (src == dst) {
1828 return 0;
1829 } else if (src < dst) {
1830 return 1;
1831 } else {
1832 return 2;
1836 static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask)
1838 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
1839 uint16_t r = val & mask;
1840 if (r == 0 || mask == 0) {
1841 return 0;
1842 } else if (r == mask) {
1843 return 3;
1844 } else {
1845 return 1;
1849 /* set condition code for test under mask */
1850 static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask)
1852 uint16_t r = val & mask;
1853 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
1854 if (r == 0 || mask == 0) {
1855 return 0;
1856 } else if (r == mask) {
1857 return 3;
1858 } else {
1859 while (!(mask & 0x8000)) {
1860 mask <<= 1;
1861 val <<= 1;
1863 if (val & 0x8000) {
1864 return 2;
1865 } else {
1866 return 1;
1871 static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst)
1873 return !!dst;
1876 static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2,
1877 int64_t ar)
1879 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1880 return 3; /* overflow */
1881 } else {
1882 if (ar < 0) {
1883 return 1;
1884 } else if (ar > 0) {
1885 return 2;
1886 } else {
1887 return 0;
1892 static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2,
1893 uint64_t ar)
1895 if (ar == 0) {
1896 if (a1) {
1897 return 2;
1898 } else {
1899 return 0;
1901 } else {
1902 if (ar < a1 || ar < a2) {
1903 return 3;
1904 } else {
1905 return 1;
1910 static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2,
1911 int64_t ar)
1913 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
1914 return 3; /* overflow */
1915 } else {
1916 if (ar < 0) {
1917 return 1;
1918 } else if (ar > 0) {
1919 return 2;
1920 } else {
1921 return 0;
1926 static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2,
1927 uint64_t ar)
1929 if (ar == 0) {
1930 return 2;
1931 } else {
1932 if (a2 > a1) {
1933 return 1;
1934 } else {
1935 return 3;
1940 static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst)
1942 if ((uint64_t)dst == 0x8000000000000000ULL) {
1943 return 3;
1944 } else if (dst) {
1945 return 1;
1946 } else {
1947 return 0;
1951 static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst)
1953 return !!dst;
1956 static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst)
1958 if ((uint64_t)dst == 0x8000000000000000ULL) {
1959 return 3;
1960 } else if (dst < 0) {
1961 return 1;
1962 } else if (dst > 0) {
1963 return 2;
1964 } else {
1965 return 0;
1970 static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2,
1971 int32_t ar)
1973 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1974 return 3; /* overflow */
1975 } else {
1976 if (ar < 0) {
1977 return 1;
1978 } else if (ar > 0) {
1979 return 2;
1980 } else {
1981 return 0;
1986 static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2,
1987 uint32_t ar)
1989 if (ar == 0) {
1990 if (a1) {
1991 return 2;
1992 } else {
1993 return 0;
1995 } else {
1996 if (ar < a1 || ar < a2) {
1997 return 3;
1998 } else {
1999 return 1;
2004 static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2,
2005 int32_t ar)
2007 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
2008 return 3; /* overflow */
2009 } else {
2010 if (ar < 0) {
2011 return 1;
2012 } else if (ar > 0) {
2013 return 2;
2014 } else {
2015 return 0;
2020 static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2,
2021 uint32_t ar)
2023 if (ar == 0) {
2024 return 2;
2025 } else {
2026 if (a2 > a1) {
2027 return 1;
2028 } else {
2029 return 3;
2034 static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst)
2036 if ((uint32_t)dst == 0x80000000UL) {
2037 return 3;
2038 } else if (dst) {
2039 return 1;
2040 } else {
2041 return 0;
2045 static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst)
2047 return !!dst;
2050 static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst)
2052 if ((uint32_t)dst == 0x80000000UL) {
2053 return 3;
2054 } else if (dst < 0) {
2055 return 1;
2056 } else if (dst > 0) {
2057 return 2;
2058 } else {
2059 return 0;
2063 /* calculate condition code for insert character under mask insn */
2064 static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val)
2066 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
2067 uint32_t cc;
2069 if (mask == 0xf) {
2070 if (!val) {
2071 return 0;
2072 } else if (val & 0x80000000) {
2073 return 1;
2074 } else {
2075 return 2;
2079 if (!val || !mask) {
2080 cc = 0;
2081 } else {
2082 while (mask != 1) {
2083 mask >>= 1;
2084 val >>= 8;
2086 if (val & 0x80) {
2087 cc = 1;
2088 } else {
2089 cc = 2;
2092 return cc;
2095 static inline uint32_t cc_calc_slag(CPUState *env, uint64_t src, uint64_t shift)
2097 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
2098 uint64_t match, r;
2100 /* check if the sign bit stays the same */
2101 if (src & (1ULL << 63)) {
2102 match = mask;
2103 } else {
2104 match = 0;
2107 if ((src & mask) != match) {
2108 /* overflow */
2109 return 3;
2112 r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
2114 if ((int64_t)r == 0) {
2115 return 0;
2116 } else if ((int64_t)r < 0) {
2117 return 1;
2120 return 2;
2124 static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src,
2125 uint64_t dst, uint64_t vr)
2127 uint32_t r = 0;
2129 switch (cc_op) {
2130 case CC_OP_CONST0:
2131 case CC_OP_CONST1:
2132 case CC_OP_CONST2:
2133 case CC_OP_CONST3:
2134 /* cc_op value _is_ cc */
2135 r = cc_op;
2136 break;
2137 case CC_OP_LTGT0_32:
2138 r = cc_calc_ltgt0_32(env, dst);
2139 break;
2140 case CC_OP_LTGT0_64:
2141 r = cc_calc_ltgt0_64(env, dst);
2142 break;
2143 case CC_OP_LTGT_32:
2144 r = cc_calc_ltgt_32(env, src, dst);
2145 break;
2146 case CC_OP_LTGT_64:
2147 r = cc_calc_ltgt_64(env, src, dst);
2148 break;
2149 case CC_OP_LTUGTU_32:
2150 r = cc_calc_ltugtu_32(env, src, dst);
2151 break;
2152 case CC_OP_LTUGTU_64:
2153 r = cc_calc_ltugtu_64(env, src, dst);
2154 break;
2155 case CC_OP_TM_32:
2156 r = cc_calc_tm_32(env, src, dst);
2157 break;
2158 case CC_OP_TM_64:
2159 r = cc_calc_tm_64(env, src, dst);
2160 break;
2161 case CC_OP_NZ:
2162 r = cc_calc_nz(env, dst);
2163 break;
2164 case CC_OP_ADD_64:
2165 r = cc_calc_add_64(env, src, dst, vr);
2166 break;
2167 case CC_OP_ADDU_64:
2168 r = cc_calc_addu_64(env, src, dst, vr);
2169 break;
2170 case CC_OP_SUB_64:
2171 r = cc_calc_sub_64(env, src, dst, vr);
2172 break;
2173 case CC_OP_SUBU_64:
2174 r = cc_calc_subu_64(env, src, dst, vr);
2175 break;
2176 case CC_OP_ABS_64:
2177 r = cc_calc_abs_64(env, dst);
2178 break;
2179 case CC_OP_NABS_64:
2180 r = cc_calc_nabs_64(env, dst);
2181 break;
2182 case CC_OP_COMP_64:
2183 r = cc_calc_comp_64(env, dst);
2184 break;
2186 case CC_OP_ADD_32:
2187 r = cc_calc_add_32(env, src, dst, vr);
2188 break;
2189 case CC_OP_ADDU_32:
2190 r = cc_calc_addu_32(env, src, dst, vr);
2191 break;
2192 case CC_OP_SUB_32:
2193 r = cc_calc_sub_32(env, src, dst, vr);
2194 break;
2195 case CC_OP_SUBU_32:
2196 r = cc_calc_subu_32(env, src, dst, vr);
2197 break;
2198 case CC_OP_ABS_32:
2199 r = cc_calc_abs_64(env, dst);
2200 break;
2201 case CC_OP_NABS_32:
2202 r = cc_calc_nabs_64(env, dst);
2203 break;
2204 case CC_OP_COMP_32:
2205 r = cc_calc_comp_32(env, dst);
2206 break;
2208 case CC_OP_ICM:
2209 r = cc_calc_icm_32(env, src, dst);
2210 break;
2211 case CC_OP_SLAG:
2212 r = cc_calc_slag(env, src, dst);
2213 break;
2215 case CC_OP_LTGT_F32:
2216 r = set_cc_f32(src, dst);
2217 break;
2218 case CC_OP_LTGT_F64:
2219 r = set_cc_f64(src, dst);
2220 break;
2221 case CC_OP_NZ_F32:
2222 r = set_cc_nz_f32(dst);
2223 break;
2224 case CC_OP_NZ_F64:
2225 r = set_cc_nz_f64(dst);
2226 break;
2228 default:
2229 cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
2232 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
2233 cc_name(cc_op), src, dst, vr, r);
2234 return r;
2237 uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
2238 uint64_t vr)
2240 return do_calc_cc(env, cc_op, src, dst, vr);
2243 uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
2244 uint64_t vr)
2246 return do_calc_cc(env, cc_op, src, dst, vr);
2249 uint64_t HELPER(cvd)(int32_t bin)
2251 /* positive 0 */
2252 uint64_t dec = 0x0c;
2253 int shift = 4;
2255 if (bin < 0) {
2256 bin = -bin;
2257 dec = 0x0d;
2260 for (shift = 4; (shift < 64) && bin; shift += 4) {
2261 int current_number = bin % 10;
2263 dec |= (current_number) << shift;
2264 bin /= 10;
2267 return dec;
2270 void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
2272 int len_dest = len >> 4;
2273 int len_src = len & 0xf;
2274 uint8_t b;
2275 int second_nibble = 0;
2277 dest += len_dest;
2278 src += len_src;
2280 /* last byte is special, it only flips the nibbles */
2281 b = ldub(src);
2282 stb(dest, (b << 4) | (b >> 4));
2283 src--;
2284 len_src--;
2286 /* now pad every nibble with 0xf0 */
2288 while (len_dest > 0) {
2289 uint8_t cur_byte = 0;
2291 if (len_src > 0) {
2292 cur_byte = ldub(src);
2295 len_dest--;
2296 dest--;
2298 /* only advance one nibble at a time */
2299 if (second_nibble) {
2300 cur_byte >>= 4;
2301 len_src--;
2302 src--;
2304 second_nibble = !second_nibble;
2306 /* digit */
2307 cur_byte = (cur_byte & 0xf);
2308 /* zone bits */
2309 cur_byte |= 0xf0;
2311 stb(dest, cur_byte);
2315 void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
2317 int i;
2319 for (i = 0; i <= len; i++) {
2320 uint8_t byte = ldub(array + i);
2321 uint8_t new_byte = ldub(trans + byte);
2322 stb(array + i, new_byte);
2326 #ifndef CONFIG_USER_ONLY
2328 void HELPER(load_psw)(uint64_t mask, uint64_t addr)
2330 load_psw(env, mask, addr);
2331 cpu_loop_exit(env);
2334 static void program_interrupt(CPUState *env, uint32_t code, int ilc)
2336 qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
2338 if (kvm_enabled()) {
2339 #ifdef CONFIG_KVM
2340 kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
2341 #endif
2342 } else {
2343 env->int_pgm_code = code;
2344 env->int_pgm_ilc = ilc;
2345 env->exception_index = EXCP_PGM;
2346 cpu_loop_exit(env);
2350 static void ext_interrupt(CPUState *env, int type, uint32_t param,
2351 uint64_t param64)
2353 cpu_inject_ext(env, type, param, param64);
2356 int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code)
2358 int r = 0;
2359 int shift = 0;
2361 #ifdef DEBUG_HELPER
2362 printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
2363 #endif
2365 if (sccb & ~0x7ffffff8ul) {
2366 fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
2367 r = -1;
2368 goto out;
2371 switch(code) {
2372 case SCLP_CMDW_READ_SCP_INFO:
2373 case SCLP_CMDW_READ_SCP_INFO_FORCED:
2374 while ((ram_size >> (20 + shift)) > 65535) {
2375 shift++;
2377 stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift));
2378 stb_phys(sccb + SCP_INCREMENT, 1 << shift);
2379 stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
2381 if (kvm_enabled()) {
2382 #ifdef CONFIG_KVM
2383 kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
2384 sccb & ~3, 0, 1);
2385 #endif
2386 } else {
2387 env->psw.addr += 4;
2388 ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
2390 break;
2391 default:
2392 #ifdef DEBUG_HELPER
2393 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
2394 #endif
2395 r = -1;
2396 break;
2399 out:
2400 return r;
2403 /* SCLP service call */
2404 uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
2406 if (sclp_service_call(env, r1, r2)) {
2407 return 3;
2410 return 0;
2413 /* DIAG */
2414 uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
2416 uint64_t r;
2418 switch (num) {
2419 case 0x500:
2420 /* KVM hypercall */
2421 r = s390_virtio_hypercall(env, mem, code);
2422 break;
2423 case 0x44:
2424 /* yield */
2425 r = 0;
2426 break;
2427 case 0x308:
2428 /* ipl */
2429 r = 0;
2430 break;
2431 default:
2432 r = -1;
2433 break;
2436 if (r) {
2437 program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
2440 return r;
2443 /* Store CPU ID */
2444 void HELPER(stidp)(uint64_t a1)
2446 stq(a1, env->cpu_num);
2449 /* Set Prefix */
2450 void HELPER(spx)(uint64_t a1)
2452 uint32_t prefix;
2454 prefix = ldl(a1);
2455 env->psa = prefix & 0xfffff000;
2456 qemu_log("prefix: %#x\n", prefix);
2457 tlb_flush_page(env, 0);
2458 tlb_flush_page(env, TARGET_PAGE_SIZE);
2461 /* Set Clock */
2462 uint32_t HELPER(sck)(uint64_t a1)
2464 /* XXX not implemented - is it necessary? */
2466 return 0;
2469 static inline uint64_t clock_value(CPUState *env)
2471 uint64_t time;
2473 time = env->tod_offset +
2474 time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
2476 return time;
2479 /* Store Clock */
2480 uint32_t HELPER(stck)(uint64_t a1)
2482 stq(a1, clock_value(env));
2484 return 0;
2487 /* Store Clock Extended */
2488 uint32_t HELPER(stcke)(uint64_t a1)
2490 stb(a1, 0);
2491 /* basically the same value as stck */
2492 stq(a1 + 1, clock_value(env) | env->cpu_num);
2493 /* more fine grained than stck */
2494 stq(a1 + 9, 0);
2495 /* XXX programmable fields */
2496 stw(a1 + 17, 0);
2499 return 0;
2502 /* Set Clock Comparator */
2503 void HELPER(sckc)(uint64_t a1)
2505 uint64_t time = ldq(a1);
2507 if (time == -1ULL) {
2508 return;
2511 /* difference between now and then */
2512 time -= clock_value(env);
2513 /* nanoseconds */
2514 time = (time * 125) >> 9;
2516 qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
2519 /* Store Clock Comparator */
2520 void HELPER(stckc)(uint64_t a1)
2522 /* XXX implement */
2523 stq(a1, 0);
2526 /* Set CPU Timer */
2527 void HELPER(spt)(uint64_t a1)
2529 uint64_t time = ldq(a1);
2531 if (time == -1ULL) {
2532 return;
2535 /* nanoseconds */
2536 time = (time * 125) >> 9;
2538 qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
2541 /* Store CPU Timer */
2542 void HELPER(stpt)(uint64_t a1)
2544 /* XXX implement */
2545 stq(a1, 0);
2548 /* Store System Information */
2549 uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
2551 int cc = 0;
2552 int sel1, sel2;
2554 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
2555 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
2556 /* valid function code, invalid reserved bits */
2557 program_interrupt(env, PGM_SPECIFICATION, 2);
2560 sel1 = r0 & STSI_R0_SEL1_MASK;
2561 sel2 = r1 & STSI_R1_SEL2_MASK;
2563 /* XXX: spec exception if sysib is not 4k-aligned */
2565 switch (r0 & STSI_LEVEL_MASK) {
2566 case STSI_LEVEL_1:
2567 if ((sel1 == 1) && (sel2 == 1)) {
2568 /* Basic Machine Configuration */
2569 struct sysib_111 sysib;
2571 memset(&sysib, 0, sizeof(sysib));
2572 ebcdic_put(sysib.manuf, "QEMU ", 16);
2573 /* same as machine type number in STORE CPU ID */
2574 ebcdic_put(sysib.type, "QEMU", 4);
2575 /* same as model number in STORE CPU ID */
2576 ebcdic_put(sysib.model, "QEMU ", 16);
2577 ebcdic_put(sysib.sequence, "QEMU ", 16);
2578 ebcdic_put(sysib.plant, "QEMU", 4);
2579 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2580 } else if ((sel1 == 2) && (sel2 == 1)) {
2581 /* Basic Machine CPU */
2582 struct sysib_121 sysib;
2584 memset(&sysib, 0, sizeof(sysib));
2585 /* XXX make different for different CPUs? */
2586 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2587 ebcdic_put(sysib.plant, "QEMU", 4);
2588 stw_p(&sysib.cpu_addr, env->cpu_num);
2589 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2590 } else if ((sel1 == 2) && (sel2 == 2)) {
2591 /* Basic Machine CPUs */
2592 struct sysib_122 sysib;
2594 memset(&sysib, 0, sizeof(sysib));
2595 stl_p(&sysib.capability, 0x443afc29);
2596 /* XXX change when SMP comes */
2597 stw_p(&sysib.total_cpus, 1);
2598 stw_p(&sysib.active_cpus, 1);
2599 stw_p(&sysib.standby_cpus, 0);
2600 stw_p(&sysib.reserved_cpus, 0);
2601 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2602 } else {
2603 cc = 3;
2605 break;
2606 case STSI_LEVEL_2:
2608 if ((sel1 == 2) && (sel2 == 1)) {
2609 /* LPAR CPU */
2610 struct sysib_221 sysib;
2612 memset(&sysib, 0, sizeof(sysib));
2613 /* XXX make different for different CPUs? */
2614 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2615 ebcdic_put(sysib.plant, "QEMU", 4);
2616 stw_p(&sysib.cpu_addr, env->cpu_num);
2617 stw_p(&sysib.cpu_id, 0);
2618 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2619 } else if ((sel1 == 2) && (sel2 == 2)) {
2620 /* LPAR CPUs */
2621 struct sysib_222 sysib;
2623 memset(&sysib, 0, sizeof(sysib));
2624 stw_p(&sysib.lpar_num, 0);
2625 sysib.lcpuc = 0;
2626 /* XXX change when SMP comes */
2627 stw_p(&sysib.total_cpus, 1);
2628 stw_p(&sysib.conf_cpus, 1);
2629 stw_p(&sysib.standby_cpus, 0);
2630 stw_p(&sysib.reserved_cpus, 0);
2631 ebcdic_put(sysib.name, "QEMU ", 8);
2632 stl_p(&sysib.caf, 1000);
2633 stw_p(&sysib.dedicated_cpus, 0);
2634 stw_p(&sysib.shared_cpus, 0);
2635 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2636 } else {
2637 cc = 3;
2639 break;
2641 case STSI_LEVEL_3:
2643 if ((sel1 == 2) && (sel2 == 2)) {
2644 /* VM CPUs */
2645 struct sysib_322 sysib;
2647 memset(&sysib, 0, sizeof(sysib));
2648 sysib.count = 1;
2649 /* XXX change when SMP comes */
2650 stw_p(&sysib.vm[0].total_cpus, 1);
2651 stw_p(&sysib.vm[0].conf_cpus, 1);
2652 stw_p(&sysib.vm[0].standby_cpus, 0);
2653 stw_p(&sysib.vm[0].reserved_cpus, 0);
2654 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
2655 stl_p(&sysib.vm[0].caf, 1000);
2656 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
2657 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2658 } else {
2659 cc = 3;
2661 break;
2663 case STSI_LEVEL_CURRENT:
2664 env->regs[0] = STSI_LEVEL_3;
2665 break;
2666 default:
2667 cc = 3;
2668 break;
2671 return cc;
2674 void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
2676 int i;
2677 uint64_t src = a2;
2679 for (i = r1;; i = (i + 1) % 16) {
2680 env->cregs[i] = ldq(src);
2681 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
2682 i, src, env->cregs[i]);
2683 src += sizeof(uint64_t);
2685 if (i == r3) {
2686 break;
2690 tlb_flush(env, 1);
2693 void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2695 int i;
2696 uint64_t src = a2;
2698 for (i = r1;; i = (i + 1) % 16) {
2699 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
2700 src += sizeof(uint32_t);
2702 if (i == r3) {
2703 break;
2707 tlb_flush(env, 1);
2710 void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
2712 int i;
2713 uint64_t dest = a2;
2715 for (i = r1;; i = (i + 1) % 16) {
2716 stq(dest, env->cregs[i]);
2717 dest += sizeof(uint64_t);
2719 if (i == r3) {
2720 break;
2725 void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2727 int i;
2728 uint64_t dest = a2;
2730 for (i = r1;; i = (i + 1) % 16) {
2731 stl(dest, env->cregs[i]);
2732 dest += sizeof(uint32_t);
2734 if (i == r3) {
2735 break;
2740 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
2742 /* XXX implement */
2744 return 0;
2747 /* insert storage key extended */
2748 uint64_t HELPER(iske)(uint64_t r2)
2750 uint64_t addr = get_address(0, 0, r2);
2752 if (addr > ram_size) {
2753 return 0;
2756 /* XXX maybe use qemu's internal keys? */
2757 return env->storage_keys[addr / TARGET_PAGE_SIZE];
2760 /* set storage key extended */
2761 void HELPER(sske)(uint32_t r1, uint64_t r2)
2763 uint64_t addr = get_address(0, 0, r2);
2765 if (addr > ram_size) {
2766 return;
2769 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
2772 /* reset reference bit extended */
2773 uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
2775 if (r2 > ram_size) {
2776 return 0;
2779 /* XXX implement */
2780 #if 0
2781 env->storage_keys[r2 / TARGET_PAGE_SIZE] &= ~SK_REFERENCED;
2782 #endif
2785 * cc
2787 * 0 Reference bit zero; change bit zero
2788 * 1 Reference bit zero; change bit one
2789 * 2 Reference bit one; change bit zero
2790 * 3 Reference bit one; change bit one
2792 return 0;
2795 /* compare and swap and purge */
2796 uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
2798 uint32_t cc;
2799 uint32_t o1 = env->regs[r1];
2800 uint64_t a2 = get_address_31fix(r2) & ~3ULL;
2801 uint32_t o2 = ldl(a2);
2803 if (o1 == o2) {
2804 stl(a2, env->regs[(r1 + 1) & 15]);
2805 if (env->regs[r2] & 0x3) {
2806 /* flush TLB / ALB */
2807 tlb_flush(env, 1);
2809 cc = 0;
2810 } else {
2811 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
2812 cc = 1;
2815 return cc;
2818 static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
2819 uint64_t mode2)
2821 target_ulong src, dest;
2822 int flags, cc = 0, i;
2824 if (!l) {
2825 return 0;
2826 } else if (l > 256) {
2827 /* max 256 */
2828 l = 256;
2829 cc = 3;
2832 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
2833 cpu_loop_exit(env);
2835 dest |= a1 & ~TARGET_PAGE_MASK;
2837 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
2838 cpu_loop_exit(env);
2840 src |= a2 & ~TARGET_PAGE_MASK;
2842 /* XXX replace w/ memcpy */
2843 for (i = 0; i < l; i++) {
2844 /* XXX be more clever */
2845 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
2846 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
2847 mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
2848 break;
2850 stb_phys(dest + i, ldub_phys(src + i));
2853 return cc;
2856 uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
2858 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2859 __FUNCTION__, l, a1, a2);
2861 return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
2864 uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
2866 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2867 __FUNCTION__, l, a1, a2);
2869 return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
2872 uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
2874 int cc = 0;
2876 HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
2877 __FUNCTION__, order_code, r1, cpu_addr);
2879 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2880 as parameter (input). Status (output) is always R1. */
2882 switch (order_code) {
2883 case SIGP_SET_ARCH:
2884 /* switch arch */
2885 break;
2886 case SIGP_SENSE:
2887 /* enumerate CPU status */
2888 if (cpu_addr) {
2889 /* XXX implement when SMP comes */
2890 return 3;
2892 env->regs[r1] &= 0xffffffff00000000ULL;
2893 cc = 1;
2894 break;
2895 default:
2896 /* unknown sigp */
2897 fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
2898 cc = 3;
2901 return cc;
2904 void HELPER(sacf)(uint64_t a1)
2906 HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
2908 switch (a1 & 0xf00) {
2909 case 0x000:
2910 env->psw.mask &= ~PSW_MASK_ASC;
2911 env->psw.mask |= PSW_ASC_PRIMARY;
2912 break;
2913 case 0x100:
2914 env->psw.mask &= ~PSW_MASK_ASC;
2915 env->psw.mask |= PSW_ASC_SECONDARY;
2916 break;
2917 case 0x300:
2918 env->psw.mask &= ~PSW_MASK_ASC;
2919 env->psw.mask |= PSW_ASC_HOME;
2920 break;
2921 default:
2922 qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
2923 program_interrupt(env, PGM_SPECIFICATION, 2);
2924 break;
2928 /* invalidate pte */
2929 void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
2931 uint64_t page = vaddr & TARGET_PAGE_MASK;
2932 uint64_t pte = 0;
2934 /* XXX broadcast to other CPUs */
2936 /* XXX Linux is nice enough to give us the exact pte address.
2937 According to spec we'd have to find it out ourselves */
2938 /* XXX Linux is fine with overwriting the pte, the spec requires
2939 us to only set the invalid bit */
2940 stq_phys(pte_addr, pte | _PAGE_INVALID);
2942 /* XXX we exploit the fact that Linux passes the exact virtual
2943 address here - it's not obliged to! */
2944 tlb_flush_page(env, page);
2947 /* flush local tlb */
2948 void HELPER(ptlb)(void)
2950 tlb_flush(env, 1);
2953 /* store using real address */
2954 void HELPER(stura)(uint64_t addr, uint32_t v1)
2956 stw_phys(get_address(0, 0, addr), v1);
2959 /* load real address */
2960 uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
2962 uint32_t cc = 0;
2963 int old_exc = env->exception_index;
2964 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2965 uint64_t ret;
2966 int flags;
2968 /* XXX incomplete - has more corner cases */
2969 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
2970 program_interrupt(env, PGM_SPECIAL_OP, 2);
2973 env->exception_index = old_exc;
2974 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
2975 cc = 3;
2977 if (env->exception_index == EXCP_PGM) {
2978 ret = env->int_pgm_code | 0x80000000;
2979 } else {
2980 ret |= addr & ~TARGET_PAGE_MASK;
2982 env->exception_index = old_exc;
2984 if (!(env->psw.mask & PSW_MASK_64)) {
2985 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
2986 } else {
2987 env->regs[r1] = ret;
2990 return cc;
2993 #endif