sdl: Fix termination in -no-shutdown mode
[qemu.git] / target-s390x / op_helper.c
blobcd33f99d21359591b605dfcbb3739e18c1503624
1 /*
2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "helpers.h"
24 #include <string.h>
25 #include "kvm.h"
26 #include "qemu-timer.h"
27 #ifdef CONFIG_KVM
28 #include <linux/kvm.h>
29 #endif
31 /*****************************************************************************/
32 /* Softmmu support */
33 #if !defined (CONFIG_USER_ONLY)
35 #define MMUSUFFIX _mmu
37 #define SHIFT 0
38 #include "softmmu_template.h"
40 #define SHIFT 1
41 #include "softmmu_template.h"
43 #define SHIFT 2
44 #include "softmmu_template.h"
46 #define SHIFT 3
47 #include "softmmu_template.h"
49 /* try to fill the TLB and return an exception if error. If retaddr is
50 NULL, it means that the function was called in C code (i.e. not
51 from generated code or from helper.c) */
52 /* XXX: fix it to restore all registers */
53 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
55 TranslationBlock *tb;
56 CPUState *saved_env;
57 unsigned long pc;
58 int ret;
60 /* XXX: hack to restore env in all cases, even if not called from
61 generated code */
62 saved_env = env;
63 env = cpu_single_env;
64 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
65 if (unlikely(ret != 0)) {
66 if (likely(retaddr)) {
67 /* now we have a real cpu fault */
68 pc = (unsigned long)retaddr;
69 tb = tb_find_pc(pc);
70 if (likely(tb)) {
71 /* the PC is inside the translated code. It means that we have
72 a virtual CPU fault */
73 cpu_restore_state(tb, env, pc);
76 cpu_loop_exit(env);
78 env = saved_env;
81 #endif
83 /* #define DEBUG_HELPER */
84 #ifdef DEBUG_HELPER
85 #define HELPER_LOG(x...) qemu_log(x)
86 #else
87 #define HELPER_LOG(x...)
88 #endif
90 /* raise an exception */
91 void HELPER(exception)(uint32_t excp)
93 HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
94 env->exception_index = excp;
95 cpu_loop_exit(env);
98 #ifndef CONFIG_USER_ONLY
99 static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest,
100 uint8_t byte)
102 target_phys_addr_t dest_phys;
103 target_phys_addr_t len = l;
104 void *dest_p;
105 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
106 int flags;
108 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
109 stb(dest, byte);
110 cpu_abort(env, "should never reach here");
112 dest_phys |= dest & ~TARGET_PAGE_MASK;
114 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
116 memset(dest_p, byte, len);
118 cpu_physical_memory_unmap(dest_p, 1, len, len);
121 static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest,
122 uint64_t src)
124 target_phys_addr_t dest_phys;
125 target_phys_addr_t src_phys;
126 target_phys_addr_t len = l;
127 void *dest_p;
128 void *src_p;
129 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
130 int flags;
132 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
133 stb(dest, 0);
134 cpu_abort(env, "should never reach here");
136 dest_phys |= dest & ~TARGET_PAGE_MASK;
138 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
139 ldub(src);
140 cpu_abort(env, "should never reach here");
142 src_phys |= src & ~TARGET_PAGE_MASK;
144 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
145 src_p = cpu_physical_memory_map(src_phys, &len, 0);
147 memmove(dest_p, src_p, len);
149 cpu_physical_memory_unmap(dest_p, 1, len, len);
150 cpu_physical_memory_unmap(src_p, 0, len, len);
152 #endif
154 /* and on array */
155 uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
157 int i;
158 unsigned char x;
159 uint32_t cc = 0;
161 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
162 __FUNCTION__, l, dest, src);
163 for (i = 0; i <= l; i++) {
164 x = ldub(dest + i) & ldub(src + i);
165 if (x) {
166 cc = 1;
168 stb(dest + i, x);
170 return cc;
173 /* xor on array */
174 uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
176 int i;
177 unsigned char x;
178 uint32_t cc = 0;
180 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
181 __FUNCTION__, l, dest, src);
183 #ifndef CONFIG_USER_ONLY
184 /* xor with itself is the same as memset(0) */
185 if ((l > 32) && (src == dest) &&
186 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
187 mvc_fast_memset(env, l + 1, dest, 0);
188 return 0;
190 #else
191 if (src == dest) {
192 memset(g2h(dest), 0, l + 1);
193 return 0;
195 #endif
197 for (i = 0; i <= l; i++) {
198 x = ldub(dest + i) ^ ldub(src + i);
199 if (x) {
200 cc = 1;
202 stb(dest + i, x);
204 return cc;
207 /* or on array */
208 uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
210 int i;
211 unsigned char x;
212 uint32_t cc = 0;
214 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
215 __FUNCTION__, l, dest, src);
216 for (i = 0; i <= l; i++) {
217 x = ldub(dest + i) | ldub(src + i);
218 if (x) {
219 cc = 1;
221 stb(dest + i, x);
223 return cc;
226 /* memmove */
227 void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
229 int i = 0;
230 int x = 0;
231 uint32_t l_64 = (l + 1) / 8;
233 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
234 __FUNCTION__, l, dest, src);
236 #ifndef CONFIG_USER_ONLY
237 if ((l > 32) &&
238 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
239 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
240 if (dest == (src + 1)) {
241 mvc_fast_memset(env, l + 1, dest, ldub(src));
242 return;
243 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
244 mvc_fast_memmove(env, l + 1, dest, src);
245 return;
248 #else
249 if (dest == (src + 1)) {
250 memset(g2h(dest), ldub(src), l + 1);
251 return;
252 } else {
253 memmove(g2h(dest), g2h(src), l + 1);
254 return;
256 #endif
258 /* handle the parts that fit into 8-byte loads/stores */
259 if (dest != (src + 1)) {
260 for (i = 0; i < l_64; i++) {
261 stq(dest + x, ldq(src + x));
262 x += 8;
266 /* slow version crossing pages with byte accesses */
267 for (i = x; i <= l; i++) {
268 stb(dest + i, ldub(src + i));
272 /* compare unsigned byte arrays */
273 uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
275 int i;
276 unsigned char x,y;
277 uint32_t cc;
278 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
279 __FUNCTION__, l, s1, s2);
280 for (i = 0; i <= l; i++) {
281 x = ldub(s1 + i);
282 y = ldub(s2 + i);
283 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
284 if (x < y) {
285 cc = 1;
286 goto done;
287 } else if (x > y) {
288 cc = 2;
289 goto done;
292 cc = 0;
293 done:
294 HELPER_LOG("\n");
295 return cc;
298 /* compare logical under mask */
299 uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
301 uint8_t r,d;
302 uint32_t cc;
303 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
304 mask, addr);
305 cc = 0;
306 while (mask) {
307 if (mask & 8) {
308 d = ldub(addr);
309 r = (r1 & 0xff000000UL) >> 24;
310 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
311 addr);
312 if (r < d) {
313 cc = 1;
314 break;
315 } else if (r > d) {
316 cc = 2;
317 break;
319 addr++;
321 mask = (mask << 1) & 0xf;
322 r1 <<= 8;
324 HELPER_LOG("\n");
325 return cc;
328 /* store character under mask */
329 void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
331 uint8_t r;
332 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
333 addr);
334 while (mask) {
335 if (mask & 8) {
336 r = (r1 & 0xff000000UL) >> 24;
337 stb(addr, r);
338 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
339 addr++;
341 mask = (mask << 1) & 0xf;
342 r1 <<= 8;
344 HELPER_LOG("\n");
347 /* 64/64 -> 128 unsigned multiplication */
348 void HELPER(mlg)(uint32_t r1, uint64_t v2)
350 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
351 /* assuming 64-bit hosts have __uint128_t */
352 __uint128_t res = (__uint128_t)env->regs[r1 + 1];
353 res *= (__uint128_t)v2;
354 env->regs[r1] = (uint64_t)(res >> 64);
355 env->regs[r1 + 1] = (uint64_t)res;
356 #else
357 mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
358 #endif
361 /* 128 -> 64/64 unsigned division */
362 void HELPER(dlg)(uint32_t r1, uint64_t v2)
364 uint64_t divisor = v2;
366 if (!env->regs[r1]) {
367 /* 64 -> 64/64 case */
368 env->regs[r1] = env->regs[r1+1] % divisor;
369 env->regs[r1+1] = env->regs[r1+1] / divisor;
370 return;
371 } else {
373 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
374 /* assuming 64-bit hosts have __uint128_t */
375 __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
376 (env->regs[r1+1]);
377 __uint128_t quotient = dividend / divisor;
378 env->regs[r1+1] = quotient;
379 __uint128_t remainder = dividend % divisor;
380 env->regs[r1] = remainder;
381 #else
382 /* 32-bit hosts would need special wrapper functionality - just abort if
383 we encounter such a case; it's very unlikely anyways. */
384 cpu_abort(env, "128 -> 64/64 division not implemented\n");
385 #endif
389 static inline uint64_t get_address(int x2, int b2, int d2)
391 uint64_t r = d2;
393 if (x2) {
394 r += env->regs[x2];
397 if (b2) {
398 r += env->regs[b2];
401 /* 31-Bit mode */
402 if (!(env->psw.mask & PSW_MASK_64)) {
403 r &= 0x7fffffff;
406 return r;
409 static inline uint64_t get_address_31fix(int reg)
411 uint64_t r = env->regs[reg];
413 /* 31-Bit mode */
414 if (!(env->psw.mask & PSW_MASK_64)) {
415 r &= 0x7fffffff;
418 return r;
421 /* search string (c is byte to search, r2 is string, r1 end of string) */
422 uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
424 uint64_t i;
425 uint32_t cc = 2;
426 uint64_t str = get_address_31fix(r2);
427 uint64_t end = get_address_31fix(r1);
429 HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
430 c, env->regs[r1], env->regs[r2]);
432 for (i = str; i != end; i++) {
433 if (ldub(i) == c) {
434 env->regs[r1] = i;
435 cc = 1;
436 break;
440 return cc;
443 /* unsigned string compare (c is string terminator) */
444 uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
446 uint64_t s1 = get_address_31fix(r1);
447 uint64_t s2 = get_address_31fix(r2);
448 uint8_t v1, v2;
449 uint32_t cc;
450 c = c & 0xff;
451 #ifdef CONFIG_USER_ONLY
452 if (!c) {
453 HELPER_LOG("%s: comparing '%s' and '%s'\n",
454 __FUNCTION__, (char*)g2h(s1), (char*)g2h(s2));
456 #endif
457 for (;;) {
458 v1 = ldub(s1);
459 v2 = ldub(s2);
460 if ((v1 == c || v2 == c) || (v1 != v2)) {
461 break;
463 s1++;
464 s2++;
467 if (v1 == v2) {
468 cc = 0;
469 } else {
470 cc = (v1 < v2) ? 1 : 2;
471 /* FIXME: 31-bit mode! */
472 env->regs[r1] = s1;
473 env->regs[r2] = s2;
475 return cc;
478 /* move page */
479 void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
481 /* XXX missing r0 handling */
482 #ifdef CONFIG_USER_ONLY
483 int i;
485 for (i = 0; i < TARGET_PAGE_SIZE; i++) {
486 stb(r1 + i, ldub(r2 + i));
488 #else
489 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
490 #endif
493 /* string copy (c is string terminator) */
494 void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
496 uint64_t dest = get_address_31fix(r1);
497 uint64_t src = get_address_31fix(r2);
498 uint8_t v;
499 c = c & 0xff;
500 #ifdef CONFIG_USER_ONLY
501 if (!c) {
502 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src),
503 dest);
505 #endif
506 for (;;) {
507 v = ldub(src);
508 stb(dest, v);
509 if (v == c) {
510 break;
512 src++;
513 dest++;
515 env->regs[r1] = dest; /* FIXME: 31-bit mode! */
518 /* compare and swap 64-bit */
519 uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
521 /* FIXME: locking? */
522 uint32_t cc;
523 uint64_t v2 = ldq(a2);
524 if (env->regs[r1] == v2) {
525 cc = 0;
526 stq(a2, env->regs[r3]);
527 } else {
528 cc = 1;
529 env->regs[r1] = v2;
531 return cc;
534 /* compare double and swap 64-bit */
535 uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
537 /* FIXME: locking? */
538 uint32_t cc;
539 uint64_t v2_hi = ldq(a2);
540 uint64_t v2_lo = ldq(a2 + 8);
541 uint64_t v1_hi = env->regs[r1];
542 uint64_t v1_lo = env->regs[r1 + 1];
544 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
545 cc = 0;
546 stq(a2, env->regs[r3]);
547 stq(a2 + 8, env->regs[r3 + 1]);
548 } else {
549 cc = 1;
550 env->regs[r1] = v2_hi;
551 env->regs[r1 + 1] = v2_lo;
554 return cc;
557 /* compare and swap 32-bit */
558 uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
560 /* FIXME: locking? */
561 uint32_t cc;
562 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
563 uint32_t v2 = ldl(a2);
564 if (((uint32_t)env->regs[r1]) == v2) {
565 cc = 0;
566 stl(a2, (uint32_t)env->regs[r3]);
567 } else {
568 cc = 1;
569 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
571 return cc;
574 static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
576 int pos = 24; /* top of the lower half of r1 */
577 uint64_t rmask = 0xff000000ULL;
578 uint8_t val = 0;
579 int ccd = 0;
580 uint32_t cc = 0;
582 while (mask) {
583 if (mask & 8) {
584 env->regs[r1] &= ~rmask;
585 val = ldub(address);
586 if ((val & 0x80) && !ccd) {
587 cc = 1;
589 ccd = 1;
590 if (val && cc == 0) {
591 cc = 2;
593 env->regs[r1] |= (uint64_t)val << pos;
594 address++;
596 mask = (mask << 1) & 0xf;
597 pos -= 8;
598 rmask >>= 8;
601 return cc;
604 /* execute instruction
605 this instruction executes an insn modified with the contents of r1
606 it does not change the executed instruction in memory
607 it does not change the program counter
608 in other words: tricky...
609 currently implemented by interpreting the cases it is most commonly used in
611 uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
613 uint16_t insn = lduw_code(addr);
614 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
615 insn);
616 if ((insn & 0xf0ff) == 0xd000) {
617 uint32_t l, insn2, b1, b2, d1, d2;
618 l = v1 & 0xff;
619 insn2 = ldl_code(addr + 2);
620 b1 = (insn2 >> 28) & 0xf;
621 b2 = (insn2 >> 12) & 0xf;
622 d1 = (insn2 >> 16) & 0xfff;
623 d2 = insn2 & 0xfff;
624 switch (insn & 0xf00) {
625 case 0x200:
626 helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
627 break;
628 case 0x500:
629 cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
630 break;
631 case 0x700:
632 cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
633 break;
634 default:
635 goto abort;
636 break;
638 } else if ((insn & 0xff00) == 0x0a00) {
639 /* supervisor call */
640 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
641 env->psw.addr = ret - 4;
642 env->int_svc_code = (insn|v1) & 0xff;
643 env->int_svc_ilc = 4;
644 helper_exception(EXCP_SVC);
645 } else if ((insn & 0xff00) == 0xbf00) {
646 uint32_t insn2, r1, r3, b2, d2;
647 insn2 = ldl_code(addr + 2);
648 r1 = (insn2 >> 20) & 0xf;
649 r3 = (insn2 >> 16) & 0xf;
650 b2 = (insn2 >> 12) & 0xf;
651 d2 = insn2 & 0xfff;
652 cc = helper_icm(r1, get_address(0, b2, d2), r3);
653 } else {
654 abort:
655 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
656 insn);
658 return cc;
661 /* absolute value 32-bit */
662 uint32_t HELPER(abs_i32)(int32_t val)
664 if (val < 0) {
665 return -val;
666 } else {
667 return val;
671 /* negative absolute value 32-bit */
672 int32_t HELPER(nabs_i32)(int32_t val)
674 if (val < 0) {
675 return val;
676 } else {
677 return -val;
681 /* absolute value 64-bit */
682 uint64_t HELPER(abs_i64)(int64_t val)
684 HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
686 if (val < 0) {
687 return -val;
688 } else {
689 return val;
693 /* negative absolute value 64-bit */
694 int64_t HELPER(nabs_i64)(int64_t val)
696 if (val < 0) {
697 return val;
698 } else {
699 return -val;
703 /* add with carry 32-bit unsigned */
704 uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
706 uint32_t res;
708 res = v1 + v2;
709 if (cc & 2) {
710 res++;
713 return res;
716 /* store character under mask high operates on the upper half of r1 */
717 void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
719 int pos = 56; /* top of the upper half of r1 */
721 while (mask) {
722 if (mask & 8) {
723 stb(address, (env->regs[r1] >> pos) & 0xff);
724 address++;
726 mask = (mask << 1) & 0xf;
727 pos -= 8;
731 /* insert character under mask high; same as icm, but operates on the
732 upper half of r1 */
733 uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
735 int pos = 56; /* top of the upper half of r1 */
736 uint64_t rmask = 0xff00000000000000ULL;
737 uint8_t val = 0;
738 int ccd = 0;
739 uint32_t cc = 0;
741 while (mask) {
742 if (mask & 8) {
743 env->regs[r1] &= ~rmask;
744 val = ldub(address);
745 if ((val & 0x80) && !ccd) {
746 cc = 1;
748 ccd = 1;
749 if (val && cc == 0) {
750 cc = 2;
752 env->regs[r1] |= (uint64_t)val << pos;
753 address++;
755 mask = (mask << 1) & 0xf;
756 pos -= 8;
757 rmask >>= 8;
760 return cc;
763 /* insert psw mask and condition code into r1 */
764 void HELPER(ipm)(uint32_t cc, uint32_t r1)
766 uint64_t r = env->regs[r1];
768 r &= 0xffffffff00ffffffULL;
769 r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
770 env->regs[r1] = r;
771 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
772 cc, env->psw.mask, r);
775 /* load access registers r1 to r3 from memory at a2 */
776 void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
778 int i;
780 for (i = r1;; i = (i + 1) % 16) {
781 env->aregs[i] = ldl(a2);
782 a2 += 4;
784 if (i == r3) {
785 break;
790 /* store access registers r1 to r3 in memory at a2 */
791 void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
793 int i;
795 for (i = r1;; i = (i + 1) % 16) {
796 stl(a2, env->aregs[i]);
797 a2 += 4;
799 if (i == r3) {
800 break;
805 /* move long */
806 uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
808 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
809 uint64_t dest = get_address_31fix(r1);
810 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
811 uint64_t src = get_address_31fix(r2);
812 uint8_t pad = src >> 24;
813 uint8_t v;
814 uint32_t cc;
816 if (destlen == srclen) {
817 cc = 0;
818 } else if (destlen < srclen) {
819 cc = 1;
820 } else {
821 cc = 2;
824 if (srclen > destlen) {
825 srclen = destlen;
828 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
829 v = ldub(src);
830 stb(dest, v);
833 for (; destlen; dest++, destlen--) {
834 stb(dest, pad);
837 env->regs[r1 + 1] = destlen;
838 /* can't use srclen here, we trunc'ed it */
839 env->regs[r2 + 1] -= src - env->regs[r2];
840 env->regs[r1] = dest;
841 env->regs[r2] = src;
843 return cc;
846 /* move long extended another memcopy insn with more bells and whistles */
847 uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
849 uint64_t destlen = env->regs[r1 + 1];
850 uint64_t dest = env->regs[r1];
851 uint64_t srclen = env->regs[r3 + 1];
852 uint64_t src = env->regs[r3];
853 uint8_t pad = a2 & 0xff;
854 uint8_t v;
855 uint32_t cc;
857 if (!(env->psw.mask & PSW_MASK_64)) {
858 destlen = (uint32_t)destlen;
859 srclen = (uint32_t)srclen;
860 dest &= 0x7fffffff;
861 src &= 0x7fffffff;
864 if (destlen == srclen) {
865 cc = 0;
866 } else if (destlen < srclen) {
867 cc = 1;
868 } else {
869 cc = 2;
872 if (srclen > destlen) {
873 srclen = destlen;
876 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
877 v = ldub(src);
878 stb(dest, v);
881 for (; destlen; dest++, destlen--) {
882 stb(dest, pad);
885 env->regs[r1 + 1] = destlen;
886 /* can't use srclen here, we trunc'ed it */
887 /* FIXME: 31-bit mode! */
888 env->regs[r3 + 1] -= src - env->regs[r3];
889 env->regs[r1] = dest;
890 env->regs[r3] = src;
892 return cc;
895 /* compare logical long extended memcompare insn with padding */
896 uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
898 uint64_t destlen = env->regs[r1 + 1];
899 uint64_t dest = get_address_31fix(r1);
900 uint64_t srclen = env->regs[r3 + 1];
901 uint64_t src = get_address_31fix(r3);
902 uint8_t pad = a2 & 0xff;
903 uint8_t v1 = 0,v2 = 0;
904 uint32_t cc = 0;
906 if (!(destlen || srclen)) {
907 return cc;
910 if (srclen > destlen) {
911 srclen = destlen;
914 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
915 v1 = srclen ? ldub(src) : pad;
916 v2 = destlen ? ldub(dest) : pad;
917 if (v1 != v2) {
918 cc = (v1 < v2) ? 1 : 2;
919 break;
923 env->regs[r1 + 1] = destlen;
924 /* can't use srclen here, we trunc'ed it */
925 env->regs[r3 + 1] -= src - env->regs[r3];
926 env->regs[r1] = dest;
927 env->regs[r3] = src;
929 return cc;
932 /* subtract unsigned v2 from v1 with borrow */
933 uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
935 uint32_t v1 = env->regs[r1];
936 uint32_t res = v1 + (~v2) + (cc >> 1);
938 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
939 if (cc & 2) {
940 /* borrow */
941 return v1 ? 1 : 0;
942 } else {
943 return v1 ? 3 : 2;
947 /* subtract unsigned v2 from v1 with borrow */
948 uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
950 uint64_t res = v1 + (~v2) + (cc >> 1);
952 env->regs[r1] = res;
953 if (cc & 2) {
954 /* borrow */
955 return v1 ? 1 : 0;
956 } else {
957 return v1 ? 3 : 2;
961 static inline int float_comp_to_cc(int float_compare)
963 switch (float_compare) {
964 case float_relation_equal:
965 return 0;
966 case float_relation_less:
967 return 1;
968 case float_relation_greater:
969 return 2;
970 case float_relation_unordered:
971 return 3;
972 default:
973 cpu_abort(env, "unknown return value for float compare\n");
977 /* condition codes for binary FP ops */
978 static uint32_t set_cc_f32(float32 v1, float32 v2)
980 return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
983 static uint32_t set_cc_f64(float64 v1, float64 v2)
985 return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
988 /* condition codes for unary FP ops */
989 static uint32_t set_cc_nz_f32(float32 v)
991 if (float32_is_any_nan(v)) {
992 return 3;
993 } else if (float32_is_zero(v)) {
994 return 0;
995 } else if (float32_is_neg(v)) {
996 return 1;
997 } else {
998 return 2;
1002 static uint32_t set_cc_nz_f64(float64 v)
1004 if (float64_is_any_nan(v)) {
1005 return 3;
1006 } else if (float64_is_zero(v)) {
1007 return 0;
1008 } else if (float64_is_neg(v)) {
1009 return 1;
1010 } else {
1011 return 2;
1015 static uint32_t set_cc_nz_f128(float128 v)
1017 if (float128_is_any_nan(v)) {
1018 return 3;
1019 } else if (float128_is_zero(v)) {
1020 return 0;
1021 } else if (float128_is_neg(v)) {
1022 return 1;
1023 } else {
1024 return 2;
1028 /* convert 32-bit int to 64-bit float */
1029 void HELPER(cdfbr)(uint32_t f1, int32_t v2)
1031 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
1032 env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
1035 /* convert 32-bit int to 128-bit float */
1036 void HELPER(cxfbr)(uint32_t f1, int32_t v2)
1038 CPU_QuadU v1;
1039 v1.q = int32_to_float128(v2, &env->fpu_status);
1040 env->fregs[f1].ll = v1.ll.upper;
1041 env->fregs[f1 + 2].ll = v1.ll.lower;
1044 /* convert 64-bit int to 32-bit float */
1045 void HELPER(cegbr)(uint32_t f1, int64_t v2)
1047 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1048 env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
1051 /* convert 64-bit int to 64-bit float */
1052 void HELPER(cdgbr)(uint32_t f1, int64_t v2)
1054 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1055 env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
1058 /* convert 64-bit int to 128-bit float */
1059 void HELPER(cxgbr)(uint32_t f1, int64_t v2)
1061 CPU_QuadU x1;
1062 x1.q = int64_to_float128(v2, &env->fpu_status);
1063 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
1064 x1.ll.upper, x1.ll.lower);
1065 env->fregs[f1].ll = x1.ll.upper;
1066 env->fregs[f1 + 2].ll = x1.ll.lower;
1069 /* convert 32-bit int to 32-bit float */
1070 void HELPER(cefbr)(uint32_t f1, int32_t v2)
1072 env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
1073 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
1074 env->fregs[f1].l.upper, f1);
1077 /* 32-bit FP addition RR */
1078 uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
1080 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1081 env->fregs[f2].l.upper,
1082 &env->fpu_status);
1083 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1084 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1086 return set_cc_nz_f32(env->fregs[f1].l.upper);
1089 /* 64-bit FP addition RR */
1090 uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
1092 env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
1093 &env->fpu_status);
1094 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
1095 env->fregs[f2].d, env->fregs[f1].d, f1);
1097 return set_cc_nz_f64(env->fregs[f1].d);
1100 /* 32-bit FP subtraction RR */
1101 uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
1103 env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
1104 env->fregs[f2].l.upper,
1105 &env->fpu_status);
1106 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1107 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1109 return set_cc_nz_f32(env->fregs[f1].l.upper);
1112 /* 64-bit FP subtraction RR */
1113 uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
1115 env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
1116 &env->fpu_status);
1117 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1118 __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
1120 return set_cc_nz_f64(env->fregs[f1].d);
1123 /* 32-bit FP division RR */
1124 void HELPER(debr)(uint32_t f1, uint32_t f2)
1126 env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
1127 env->fregs[f2].l.upper,
1128 &env->fpu_status);
1131 /* 128-bit FP division RR */
1132 void HELPER(dxbr)(uint32_t f1, uint32_t f2)
1134 CPU_QuadU v1;
1135 v1.ll.upper = env->fregs[f1].ll;
1136 v1.ll.lower = env->fregs[f1 + 2].ll;
1137 CPU_QuadU v2;
1138 v2.ll.upper = env->fregs[f2].ll;
1139 v2.ll.lower = env->fregs[f2 + 2].ll;
1140 CPU_QuadU res;
1141 res.q = float128_div(v1.q, v2.q, &env->fpu_status);
1142 env->fregs[f1].ll = res.ll.upper;
1143 env->fregs[f1 + 2].ll = res.ll.lower;
1146 /* 64-bit FP multiplication RR */
1147 void HELPER(mdbr)(uint32_t f1, uint32_t f2)
1149 env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
1150 &env->fpu_status);
1153 /* 128-bit FP multiplication RR */
1154 void HELPER(mxbr)(uint32_t f1, uint32_t f2)
1156 CPU_QuadU v1;
1157 v1.ll.upper = env->fregs[f1].ll;
1158 v1.ll.lower = env->fregs[f1 + 2].ll;
1159 CPU_QuadU v2;
1160 v2.ll.upper = env->fregs[f2].ll;
1161 v2.ll.lower = env->fregs[f2 + 2].ll;
1162 CPU_QuadU res;
1163 res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
1164 env->fregs[f1].ll = res.ll.upper;
1165 env->fregs[f1 + 2].ll = res.ll.lower;
1168 /* convert 32-bit float to 64-bit float */
1169 void HELPER(ldebr)(uint32_t r1, uint32_t r2)
1171 env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
1172 &env->fpu_status);
1175 /* convert 128-bit float to 64-bit float */
1176 void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
1178 CPU_QuadU x2;
1179 x2.ll.upper = env->fregs[f2].ll;
1180 x2.ll.lower = env->fregs[f2 + 2].ll;
1181 env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
1182 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
1185 /* convert 64-bit float to 128-bit float */
1186 void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
1188 CPU_QuadU res;
1189 res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
1190 env->fregs[f1].ll = res.ll.upper;
1191 env->fregs[f1 + 2].ll = res.ll.lower;
1194 /* convert 64-bit float to 32-bit float */
1195 void HELPER(ledbr)(uint32_t f1, uint32_t f2)
1197 float64 d2 = env->fregs[f2].d;
1198 env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
1201 /* convert 128-bit float to 32-bit float */
1202 void HELPER(lexbr)(uint32_t f1, uint32_t f2)
1204 CPU_QuadU x2;
1205 x2.ll.upper = env->fregs[f2].ll;
1206 x2.ll.lower = env->fregs[f2 + 2].ll;
1207 env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
1208 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
1211 /* absolute value of 32-bit float */
1212 uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
1214 float32 v1;
1215 float32 v2 = env->fregs[f2].d;
1216 v1 = float32_abs(v2);
1217 env->fregs[f1].d = v1;
1218 return set_cc_nz_f32(v1);
1221 /* absolute value of 64-bit float */
1222 uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
1224 float64 v1;
1225 float64 v2 = env->fregs[f2].d;
1226 v1 = float64_abs(v2);
1227 env->fregs[f1].d = v1;
1228 return set_cc_nz_f64(v1);
1231 /* absolute value of 128-bit float */
1232 uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
1234 CPU_QuadU v1;
1235 CPU_QuadU v2;
1236 v2.ll.upper = env->fregs[f2].ll;
1237 v2.ll.lower = env->fregs[f2 + 2].ll;
1238 v1.q = float128_abs(v2.q);
1239 env->fregs[f1].ll = v1.ll.upper;
1240 env->fregs[f1 + 2].ll = v1.ll.lower;
1241 return set_cc_nz_f128(v1.q);
1244 /* load and test 64-bit float */
1245 uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
1247 env->fregs[f1].d = env->fregs[f2].d;
1248 return set_cc_nz_f64(env->fregs[f1].d);
1251 /* load and test 32-bit float */
1252 uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
1254 env->fregs[f1].l.upper = env->fregs[f2].l.upper;
1255 return set_cc_nz_f32(env->fregs[f1].l.upper);
1258 /* load and test 128-bit float */
1259 uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
1261 CPU_QuadU x;
1262 x.ll.upper = env->fregs[f2].ll;
1263 x.ll.lower = env->fregs[f2 + 2].ll;
1264 env->fregs[f1].ll = x.ll.upper;
1265 env->fregs[f1 + 2].ll = x.ll.lower;
1266 return set_cc_nz_f128(x.q);
1269 /* load complement of 32-bit float */
1270 uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
1272 env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
1274 return set_cc_nz_f32(env->fregs[f1].l.upper);
1277 /* load complement of 64-bit float */
1278 uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
1280 env->fregs[f1].d = float64_chs(env->fregs[f2].d);
1282 return set_cc_nz_f64(env->fregs[f1].d);
1285 /* load complement of 128-bit float */
1286 uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
1288 CPU_QuadU x1, x2;
1289 x2.ll.upper = env->fregs[f2].ll;
1290 x2.ll.lower = env->fregs[f2 + 2].ll;
1291 x1.q = float128_chs(x2.q);
1292 env->fregs[f1].ll = x1.ll.upper;
1293 env->fregs[f1 + 2].ll = x1.ll.lower;
1294 return set_cc_nz_f128(x1.q);
1297 /* 32-bit FP addition RM */
1298 void HELPER(aeb)(uint32_t f1, uint32_t val)
1300 float32 v1 = env->fregs[f1].l.upper;
1301 CPU_FloatU v2;
1302 v2.l = val;
1303 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
1304 v1, f1, v2.f);
1305 env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
1308 /* 32-bit FP division RM */
1309 void HELPER(deb)(uint32_t f1, uint32_t val)
1311 float32 v1 = env->fregs[f1].l.upper;
1312 CPU_FloatU v2;
1313 v2.l = val;
1314 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
1315 v1, f1, v2.f);
1316 env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
1319 /* 32-bit FP multiplication RM */
1320 void HELPER(meeb)(uint32_t f1, uint32_t val)
1322 float32 v1 = env->fregs[f1].l.upper;
1323 CPU_FloatU v2;
1324 v2.l = val;
1325 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
1326 v1, f1, v2.f);
1327 env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
1330 /* 32-bit FP compare RR */
1331 uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
1333 float32 v1 = env->fregs[f1].l.upper;
1334 float32 v2 = env->fregs[f2].l.upper;;
1335 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
1336 v1, f1, v2);
1337 return set_cc_f32(v1, v2);
1340 /* 64-bit FP compare RR */
1341 uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
1343 float64 v1 = env->fregs[f1].d;
1344 float64 v2 = env->fregs[f2].d;;
1345 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
1346 v1, f1, v2);
1347 return set_cc_f64(v1, v2);
1350 /* 128-bit FP compare RR */
1351 uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
1353 CPU_QuadU v1;
1354 v1.ll.upper = env->fregs[f1].ll;
1355 v1.ll.lower = env->fregs[f1 + 2].ll;
1356 CPU_QuadU v2;
1357 v2.ll.upper = env->fregs[f2].ll;
1358 v2.ll.lower = env->fregs[f2 + 2].ll;
1360 return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
1361 &env->fpu_status));
1364 /* 64-bit FP compare RM */
1365 uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
1367 float64 v1 = env->fregs[f1].d;
1368 CPU_DoubleU v2;
1369 v2.ll = ldq(a2);
1370 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
1371 f1, v2.d);
1372 return set_cc_f64(v1, v2.d);
1375 /* 64-bit FP addition RM */
1376 uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
1378 float64 v1 = env->fregs[f1].d;
1379 CPU_DoubleU v2;
1380 v2.ll = ldq(a2);
1381 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
1382 v1, f1, v2.d);
1383 env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
1384 return set_cc_nz_f64(v1);
1387 /* 32-bit FP subtraction RM */
1388 void HELPER(seb)(uint32_t f1, uint32_t val)
1390 float32 v1 = env->fregs[f1].l.upper;
1391 CPU_FloatU v2;
1392 v2.l = val;
1393 env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
1396 /* 64-bit FP subtraction RM */
1397 uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
1399 float64 v1 = env->fregs[f1].d;
1400 CPU_DoubleU v2;
1401 v2.ll = ldq(a2);
1402 env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
1403 return set_cc_nz_f64(v1);
1406 /* 64-bit FP multiplication RM */
1407 void HELPER(mdb)(uint32_t f1, uint64_t a2)
1409 float64 v1 = env->fregs[f1].d;
1410 CPU_DoubleU v2;
1411 v2.ll = ldq(a2);
1412 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
1413 v1, f1, v2.d);
1414 env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
1417 /* 64-bit FP division RM */
1418 void HELPER(ddb)(uint32_t f1, uint64_t a2)
1420 float64 v1 = env->fregs[f1].d;
1421 CPU_DoubleU v2;
1422 v2.ll = ldq(a2);
1423 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
1424 v1, f1, v2.d);
1425 env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
1428 static void set_round_mode(int m3)
1430 switch (m3) {
1431 case 0:
1432 /* current mode */
1433 break;
1434 case 1:
1435 /* biased round no nearest */
1436 case 4:
1437 /* round to nearest */
1438 set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
1439 break;
1440 case 5:
1441 /* round to zero */
1442 set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
1443 break;
1444 case 6:
1445 /* round to +inf */
1446 set_float_rounding_mode(float_round_up, &env->fpu_status);
1447 break;
1448 case 7:
1449 /* round to -inf */
1450 set_float_rounding_mode(float_round_down, &env->fpu_status);
1451 break;
1455 /* convert 32-bit float to 64-bit int */
1456 uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1458 float32 v2 = env->fregs[f2].l.upper;
1459 set_round_mode(m3);
1460 env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
1461 return set_cc_nz_f32(v2);
1464 /* convert 64-bit float to 64-bit int */
1465 uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1467 float64 v2 = env->fregs[f2].d;
1468 set_round_mode(m3);
1469 env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
1470 return set_cc_nz_f64(v2);
1473 /* convert 128-bit float to 64-bit int */
1474 uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1476 CPU_QuadU v2;
1477 v2.ll.upper = env->fregs[f2].ll;
1478 v2.ll.lower = env->fregs[f2 + 2].ll;
1479 set_round_mode(m3);
1480 env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
1481 if (float128_is_any_nan(v2.q)) {
1482 return 3;
1483 } else if (float128_is_zero(v2.q)) {
1484 return 0;
1485 } else if (float128_is_neg(v2.q)) {
1486 return 1;
1487 } else {
1488 return 2;
1492 /* convert 32-bit float to 32-bit int */
1493 uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1495 float32 v2 = env->fregs[f2].l.upper;
1496 set_round_mode(m3);
1497 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1498 float32_to_int32(v2, &env->fpu_status);
1499 return set_cc_nz_f32(v2);
1502 /* convert 64-bit float to 32-bit int */
1503 uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1505 float64 v2 = env->fregs[f2].d;
1506 set_round_mode(m3);
1507 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1508 float64_to_int32(v2, &env->fpu_status);
1509 return set_cc_nz_f64(v2);
1512 /* convert 128-bit float to 32-bit int */
1513 uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1515 CPU_QuadU v2;
1516 v2.ll.upper = env->fregs[f2].ll;
1517 v2.ll.lower = env->fregs[f2 + 2].ll;
1518 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1519 float128_to_int32(v2.q, &env->fpu_status);
1520 return set_cc_nz_f128(v2.q);
1523 /* load 32-bit FP zero */
1524 void HELPER(lzer)(uint32_t f1)
1526 env->fregs[f1].l.upper = float32_zero;
1529 /* load 64-bit FP zero */
1530 void HELPER(lzdr)(uint32_t f1)
1532 env->fregs[f1].d = float64_zero;
1535 /* load 128-bit FP zero */
1536 void HELPER(lzxr)(uint32_t f1)
1538 CPU_QuadU x;
1539 x.q = float64_to_float128(float64_zero, &env->fpu_status);
1540 env->fregs[f1].ll = x.ll.upper;
1541 env->fregs[f1 + 1].ll = x.ll.lower;
1544 /* 128-bit FP subtraction RR */
1545 uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
1547 CPU_QuadU v1;
1548 v1.ll.upper = env->fregs[f1].ll;
1549 v1.ll.lower = env->fregs[f1 + 2].ll;
1550 CPU_QuadU v2;
1551 v2.ll.upper = env->fregs[f2].ll;
1552 v2.ll.lower = env->fregs[f2 + 2].ll;
1553 CPU_QuadU res;
1554 res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
1555 env->fregs[f1].ll = res.ll.upper;
1556 env->fregs[f1 + 2].ll = res.ll.lower;
1557 return set_cc_nz_f128(res.q);
1560 /* 128-bit FP addition RR */
1561 uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
1563 CPU_QuadU v1;
1564 v1.ll.upper = env->fregs[f1].ll;
1565 v1.ll.lower = env->fregs[f1 + 2].ll;
1566 CPU_QuadU v2;
1567 v2.ll.upper = env->fregs[f2].ll;
1568 v2.ll.lower = env->fregs[f2 + 2].ll;
1569 CPU_QuadU res;
1570 res.q = float128_add(v1.q, v2.q, &env->fpu_status);
1571 env->fregs[f1].ll = res.ll.upper;
1572 env->fregs[f1 + 2].ll = res.ll.lower;
1573 return set_cc_nz_f128(res.q);
1576 /* 32-bit FP multiplication RR */
1577 void HELPER(meebr)(uint32_t f1, uint32_t f2)
1579 env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
1580 env->fregs[f2].l.upper,
1581 &env->fpu_status);
1584 /* 64-bit FP division RR */
1585 void HELPER(ddbr)(uint32_t f1, uint32_t f2)
1587 env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
1588 &env->fpu_status);
1591 /* 64-bit FP multiply and add RM */
1592 void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
1594 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
1595 CPU_DoubleU v2;
1596 v2.ll = ldq(a2);
1597 env->fregs[f1].d = float64_add(env->fregs[f1].d,
1598 float64_mul(v2.d, env->fregs[f3].d,
1599 &env->fpu_status),
1600 &env->fpu_status);
1603 /* 64-bit FP multiply and add RR */
1604 void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1606 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1607 env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
1608 env->fregs[f3].d,
1609 &env->fpu_status),
1610 env->fregs[f1].d, &env->fpu_status);
1613 /* 64-bit FP multiply and subtract RR */
1614 void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1616 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1617 env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
1618 env->fregs[f3].d,
1619 &env->fpu_status),
1620 env->fregs[f1].d, &env->fpu_status);
1623 /* 32-bit FP multiply and add RR */
1624 void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
1626 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1627 float32_mul(env->fregs[f2].l.upper,
1628 env->fregs[f3].l.upper,
1629 &env->fpu_status),
1630 &env->fpu_status);
1633 /* convert 64-bit float to 128-bit float */
1634 void HELPER(lxdb)(uint32_t f1, uint64_t a2)
1636 CPU_DoubleU v2;
1637 v2.ll = ldq(a2);
1638 CPU_QuadU v1;
1639 v1.q = float64_to_float128(v2.d, &env->fpu_status);
1640 env->fregs[f1].ll = v1.ll.upper;
1641 env->fregs[f1 + 2].ll = v1.ll.lower;
1644 /* test data class 32-bit */
1645 uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
1647 float32 v1 = env->fregs[f1].l.upper;
1648 int neg = float32_is_neg(v1);
1649 uint32_t cc = 0;
1651 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
1652 if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1653 (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1654 (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1655 (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1656 cc = 1;
1657 } else if (m2 & (1 << (9-neg))) {
1658 /* assume normalized number */
1659 cc = 1;
1662 /* FIXME: denormalized? */
1663 return cc;
1666 /* test data class 64-bit */
1667 uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
1669 float64 v1 = env->fregs[f1].d;
1670 int neg = float64_is_neg(v1);
1671 uint32_t cc = 0;
1673 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
1674 if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1675 (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1676 (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1677 (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1678 cc = 1;
1679 } else if (m2 & (1 << (9-neg))) {
1680 /* assume normalized number */
1681 cc = 1;
1683 /* FIXME: denormalized? */
1684 return cc;
1687 /* test data class 128-bit */
1688 uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
1690 CPU_QuadU v1;
1691 uint32_t cc = 0;
1692 v1.ll.upper = env->fregs[f1].ll;
1693 v1.ll.lower = env->fregs[f1 + 2].ll;
1695 int neg = float128_is_neg(v1.q);
1696 if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
1697 (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
1698 (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
1699 (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
1700 cc = 1;
1701 } else if (m2 & (1 << (9-neg))) {
1702 /* assume normalized number */
1703 cc = 1;
1705 /* FIXME: denormalized? */
1706 return cc;
1709 /* find leftmost one */
1710 uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
1712 uint64_t res = 0;
1713 uint64_t ov2 = v2;
1715 while (!(v2 & 0x8000000000000000ULL) && v2) {
1716 v2 <<= 1;
1717 res++;
1720 if (!v2) {
1721 env->regs[r1] = 64;
1722 env->regs[r1 + 1] = 0;
1723 return 0;
1724 } else {
1725 env->regs[r1] = res;
1726 env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
1727 return 2;
1731 /* square root 64-bit RR */
1732 void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
1734 env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
1737 /* checksum */
1738 void HELPER(cksm)(uint32_t r1, uint32_t r2)
1740 uint64_t src = get_address_31fix(r2);
1741 uint64_t src_len = env->regs[(r2 + 1) & 15];
1742 uint64_t cksm = (uint32_t)env->regs[r1];
1744 while (src_len >= 4) {
1745 cksm += ldl(src);
1747 /* move to next word */
1748 src_len -= 4;
1749 src += 4;
1752 switch (src_len) {
1753 case 0:
1754 break;
1755 case 1:
1756 cksm += ldub(src) << 24;
1757 break;
1758 case 2:
1759 cksm += lduw(src) << 16;
1760 break;
1761 case 3:
1762 cksm += lduw(src) << 16;
1763 cksm += ldub(src + 2) << 8;
1764 break;
1767 /* indicate we've processed everything */
1768 env->regs[r2] = src + src_len;
1769 env->regs[(r2 + 1) & 15] = 0;
1771 /* store result */
1772 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1773 ((uint32_t)cksm + (cksm >> 32));
1776 static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src,
1777 int32_t dst)
1779 if (src == dst) {
1780 return 0;
1781 } else if (src < dst) {
1782 return 1;
1783 } else {
1784 return 2;
1788 static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst)
1790 return cc_calc_ltgt_32(env, dst, 0);
1793 static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src,
1794 int64_t dst)
1796 if (src == dst) {
1797 return 0;
1798 } else if (src < dst) {
1799 return 1;
1800 } else {
1801 return 2;
1805 static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst)
1807 return cc_calc_ltgt_64(env, dst, 0);
1810 static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src,
1811 uint32_t dst)
1813 if (src == dst) {
1814 return 0;
1815 } else if (src < dst) {
1816 return 1;
1817 } else {
1818 return 2;
1822 static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src,
1823 uint64_t dst)
1825 if (src == dst) {
1826 return 0;
1827 } else if (src < dst) {
1828 return 1;
1829 } else {
1830 return 2;
1834 static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask)
1836 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
1837 uint16_t r = val & mask;
1838 if (r == 0 || mask == 0) {
1839 return 0;
1840 } else if (r == mask) {
1841 return 3;
1842 } else {
1843 return 1;
1847 /* set condition code for test under mask */
1848 static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask)
1850 uint16_t r = val & mask;
1851 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
1852 if (r == 0 || mask == 0) {
1853 return 0;
1854 } else if (r == mask) {
1855 return 3;
1856 } else {
1857 while (!(mask & 0x8000)) {
1858 mask <<= 1;
1859 val <<= 1;
1861 if (val & 0x8000) {
1862 return 2;
1863 } else {
1864 return 1;
1869 static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst)
1871 return !!dst;
1874 static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2,
1875 int64_t ar)
1877 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1878 return 3; /* overflow */
1879 } else {
1880 if (ar < 0) {
1881 return 1;
1882 } else if (ar > 0) {
1883 return 2;
1884 } else {
1885 return 0;
1890 static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2,
1891 uint64_t ar)
1893 if (ar == 0) {
1894 if (a1) {
1895 return 2;
1896 } else {
1897 return 0;
1899 } else {
1900 if (ar < a1 || ar < a2) {
1901 return 3;
1902 } else {
1903 return 1;
1908 static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2,
1909 int64_t ar)
1911 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
1912 return 3; /* overflow */
1913 } else {
1914 if (ar < 0) {
1915 return 1;
1916 } else if (ar > 0) {
1917 return 2;
1918 } else {
1919 return 0;
1924 static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2,
1925 uint64_t ar)
1927 if (ar == 0) {
1928 return 2;
1929 } else {
1930 if (a2 > a1) {
1931 return 1;
1932 } else {
1933 return 3;
1938 static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst)
1940 if ((uint64_t)dst == 0x8000000000000000ULL) {
1941 return 3;
1942 } else if (dst) {
1943 return 1;
1944 } else {
1945 return 0;
1949 static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst)
1951 return !!dst;
1954 static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst)
1956 if ((uint64_t)dst == 0x8000000000000000ULL) {
1957 return 3;
1958 } else if (dst < 0) {
1959 return 1;
1960 } else if (dst > 0) {
1961 return 2;
1962 } else {
1963 return 0;
1968 static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2,
1969 int32_t ar)
1971 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1972 return 3; /* overflow */
1973 } else {
1974 if (ar < 0) {
1975 return 1;
1976 } else if (ar > 0) {
1977 return 2;
1978 } else {
1979 return 0;
1984 static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2,
1985 uint32_t ar)
1987 if (ar == 0) {
1988 if (a1) {
1989 return 2;
1990 } else {
1991 return 0;
1993 } else {
1994 if (ar < a1 || ar < a2) {
1995 return 3;
1996 } else {
1997 return 1;
2002 static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2,
2003 int32_t ar)
2005 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
2006 return 3; /* overflow */
2007 } else {
2008 if (ar < 0) {
2009 return 1;
2010 } else if (ar > 0) {
2011 return 2;
2012 } else {
2013 return 0;
2018 static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2,
2019 uint32_t ar)
2021 if (ar == 0) {
2022 return 2;
2023 } else {
2024 if (a2 > a1) {
2025 return 1;
2026 } else {
2027 return 3;
2032 static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst)
2034 if ((uint32_t)dst == 0x80000000UL) {
2035 return 3;
2036 } else if (dst) {
2037 return 1;
2038 } else {
2039 return 0;
2043 static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst)
2045 return !!dst;
2048 static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst)
2050 if ((uint32_t)dst == 0x80000000UL) {
2051 return 3;
2052 } else if (dst < 0) {
2053 return 1;
2054 } else if (dst > 0) {
2055 return 2;
2056 } else {
2057 return 0;
2061 /* calculate condition code for insert character under mask insn */
2062 static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val)
2064 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
2065 uint32_t cc;
2067 if (mask == 0xf) {
2068 if (!val) {
2069 return 0;
2070 } else if (val & 0x80000000) {
2071 return 1;
2072 } else {
2073 return 2;
2077 if (!val || !mask) {
2078 cc = 0;
2079 } else {
2080 while (mask != 1) {
2081 mask >>= 1;
2082 val >>= 8;
2084 if (val & 0x80) {
2085 cc = 1;
2086 } else {
2087 cc = 2;
2090 return cc;
2093 static inline uint32_t cc_calc_slag(CPUState *env, uint64_t src, uint64_t shift)
2095 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
2096 uint64_t match, r;
2098 /* check if the sign bit stays the same */
2099 if (src & (1ULL << 63)) {
2100 match = mask;
2101 } else {
2102 match = 0;
2105 if ((src & mask) != match) {
2106 /* overflow */
2107 return 3;
2110 r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
2112 if ((int64_t)r == 0) {
2113 return 0;
2114 } else if ((int64_t)r < 0) {
2115 return 1;
2118 return 2;
2122 static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src,
2123 uint64_t dst, uint64_t vr)
2125 uint32_t r = 0;
2127 switch (cc_op) {
2128 case CC_OP_CONST0:
2129 case CC_OP_CONST1:
2130 case CC_OP_CONST2:
2131 case CC_OP_CONST3:
2132 /* cc_op value _is_ cc */
2133 r = cc_op;
2134 break;
2135 case CC_OP_LTGT0_32:
2136 r = cc_calc_ltgt0_32(env, dst);
2137 break;
2138 case CC_OP_LTGT0_64:
2139 r = cc_calc_ltgt0_64(env, dst);
2140 break;
2141 case CC_OP_LTGT_32:
2142 r = cc_calc_ltgt_32(env, src, dst);
2143 break;
2144 case CC_OP_LTGT_64:
2145 r = cc_calc_ltgt_64(env, src, dst);
2146 break;
2147 case CC_OP_LTUGTU_32:
2148 r = cc_calc_ltugtu_32(env, src, dst);
2149 break;
2150 case CC_OP_LTUGTU_64:
2151 r = cc_calc_ltugtu_64(env, src, dst);
2152 break;
2153 case CC_OP_TM_32:
2154 r = cc_calc_tm_32(env, src, dst);
2155 break;
2156 case CC_OP_TM_64:
2157 r = cc_calc_tm_64(env, src, dst);
2158 break;
2159 case CC_OP_NZ:
2160 r = cc_calc_nz(env, dst);
2161 break;
2162 case CC_OP_ADD_64:
2163 r = cc_calc_add_64(env, src, dst, vr);
2164 break;
2165 case CC_OP_ADDU_64:
2166 r = cc_calc_addu_64(env, src, dst, vr);
2167 break;
2168 case CC_OP_SUB_64:
2169 r = cc_calc_sub_64(env, src, dst, vr);
2170 break;
2171 case CC_OP_SUBU_64:
2172 r = cc_calc_subu_64(env, src, dst, vr);
2173 break;
2174 case CC_OP_ABS_64:
2175 r = cc_calc_abs_64(env, dst);
2176 break;
2177 case CC_OP_NABS_64:
2178 r = cc_calc_nabs_64(env, dst);
2179 break;
2180 case CC_OP_COMP_64:
2181 r = cc_calc_comp_64(env, dst);
2182 break;
2184 case CC_OP_ADD_32:
2185 r = cc_calc_add_32(env, src, dst, vr);
2186 break;
2187 case CC_OP_ADDU_32:
2188 r = cc_calc_addu_32(env, src, dst, vr);
2189 break;
2190 case CC_OP_SUB_32:
2191 r = cc_calc_sub_32(env, src, dst, vr);
2192 break;
2193 case CC_OP_SUBU_32:
2194 r = cc_calc_subu_32(env, src, dst, vr);
2195 break;
2196 case CC_OP_ABS_32:
2197 r = cc_calc_abs_64(env, dst);
2198 break;
2199 case CC_OP_NABS_32:
2200 r = cc_calc_nabs_64(env, dst);
2201 break;
2202 case CC_OP_COMP_32:
2203 r = cc_calc_comp_32(env, dst);
2204 break;
2206 case CC_OP_ICM:
2207 r = cc_calc_icm_32(env, src, dst);
2208 break;
2209 case CC_OP_SLAG:
2210 r = cc_calc_slag(env, src, dst);
2211 break;
2213 case CC_OP_LTGT_F32:
2214 r = set_cc_f32(src, dst);
2215 break;
2216 case CC_OP_LTGT_F64:
2217 r = set_cc_f64(src, dst);
2218 break;
2219 case CC_OP_NZ_F32:
2220 r = set_cc_nz_f32(dst);
2221 break;
2222 case CC_OP_NZ_F64:
2223 r = set_cc_nz_f64(dst);
2224 break;
2226 default:
2227 cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
2230 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
2231 cc_name(cc_op), src, dst, vr, r);
2232 return r;
2235 uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
2236 uint64_t vr)
2238 return do_calc_cc(env, cc_op, src, dst, vr);
2241 uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
2242 uint64_t vr)
2244 return do_calc_cc(env, cc_op, src, dst, vr);
2247 uint64_t HELPER(cvd)(int32_t bin)
2249 /* positive 0 */
2250 uint64_t dec = 0x0c;
2251 int shift = 4;
2253 if (bin < 0) {
2254 bin = -bin;
2255 dec = 0x0d;
2258 for (shift = 4; (shift < 64) && bin; shift += 4) {
2259 int current_number = bin % 10;
2261 dec |= (current_number) << shift;
2262 bin /= 10;
2265 return dec;
2268 void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
2270 int len_dest = len >> 4;
2271 int len_src = len & 0xf;
2272 uint8_t b;
2273 int second_nibble = 0;
2275 dest += len_dest;
2276 src += len_src;
2278 /* last byte is special, it only flips the nibbles */
2279 b = ldub(src);
2280 stb(dest, (b << 4) | (b >> 4));
2281 src--;
2282 len_src--;
2284 /* now pad every nibble with 0xf0 */
2286 while (len_dest > 0) {
2287 uint8_t cur_byte = 0;
2289 if (len_src > 0) {
2290 cur_byte = ldub(src);
2293 len_dest--;
2294 dest--;
2296 /* only advance one nibble at a time */
2297 if (second_nibble) {
2298 cur_byte >>= 4;
2299 len_src--;
2300 src--;
2302 second_nibble = !second_nibble;
2304 /* digit */
2305 cur_byte = (cur_byte & 0xf);
2306 /* zone bits */
2307 cur_byte |= 0xf0;
2309 stb(dest, cur_byte);
2313 void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
2315 int i;
2317 for (i = 0; i <= len; i++) {
2318 uint8_t byte = ldub(array + i);
2319 uint8_t new_byte = ldub(trans + byte);
2320 stb(array + i, new_byte);
2324 #ifndef CONFIG_USER_ONLY
2326 void HELPER(load_psw)(uint64_t mask, uint64_t addr)
2328 load_psw(env, mask, addr);
2329 cpu_loop_exit(env);
2332 static void program_interrupt(CPUState *env, uint32_t code, int ilc)
2334 qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
2336 if (kvm_enabled()) {
2337 #ifdef CONFIG_KVM
2338 kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
2339 #endif
2340 } else {
2341 env->int_pgm_code = code;
2342 env->int_pgm_ilc = ilc;
2343 env->exception_index = EXCP_PGM;
2344 cpu_loop_exit(env);
2348 static void ext_interrupt(CPUState *env, int type, uint32_t param,
2349 uint64_t param64)
2351 cpu_inject_ext(env, type, param, param64);
2354 int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code)
2356 int r = 0;
2357 int shift = 0;
2359 #ifdef DEBUG_HELPER
2360 printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
2361 #endif
2363 if (sccb & ~0x7ffffff8ul) {
2364 fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
2365 r = -1;
2366 goto out;
2369 switch(code) {
2370 case SCLP_CMDW_READ_SCP_INFO:
2371 case SCLP_CMDW_READ_SCP_INFO_FORCED:
2372 while ((ram_size >> (20 + shift)) > 65535) {
2373 shift++;
2375 stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift));
2376 stb_phys(sccb + SCP_INCREMENT, 1 << shift);
2377 stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
2379 if (kvm_enabled()) {
2380 #ifdef CONFIG_KVM
2381 kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
2382 sccb & ~3, 0, 1);
2383 #endif
2384 } else {
2385 env->psw.addr += 4;
2386 ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
2388 break;
2389 default:
2390 #ifdef DEBUG_HELPER
2391 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
2392 #endif
2393 r = -1;
2394 break;
2397 out:
2398 return r;
2401 /* SCLP service call */
2402 uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
2404 if (sclp_service_call(env, r1, r2)) {
2405 return 3;
2408 return 0;
2411 /* DIAG */
2412 uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
2414 uint64_t r;
2416 switch (num) {
2417 case 0x500:
2418 /* KVM hypercall */
2419 r = s390_virtio_hypercall(env, mem, code);
2420 break;
2421 case 0x44:
2422 /* yield */
2423 r = 0;
2424 break;
2425 case 0x308:
2426 /* ipl */
2427 r = 0;
2428 break;
2429 default:
2430 r = -1;
2431 break;
2434 if (r) {
2435 program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
2438 return r;
2441 /* Store CPU ID */
2442 void HELPER(stidp)(uint64_t a1)
2444 stq(a1, env->cpu_num);
2447 /* Set Prefix */
2448 void HELPER(spx)(uint64_t a1)
2450 uint32_t prefix;
2452 prefix = ldl(a1);
2453 env->psa = prefix & 0xfffff000;
2454 qemu_log("prefix: %#x\n", prefix);
2455 tlb_flush_page(env, 0);
2456 tlb_flush_page(env, TARGET_PAGE_SIZE);
2459 /* Set Clock */
2460 uint32_t HELPER(sck)(uint64_t a1)
2462 /* XXX not implemented - is it necessary? */
2464 return 0;
2467 static inline uint64_t clock_value(CPUState *env)
2469 uint64_t time;
2471 time = env->tod_offset +
2472 time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
2474 return time;
2477 /* Store Clock */
2478 uint32_t HELPER(stck)(uint64_t a1)
2480 stq(a1, clock_value(env));
2482 return 0;
2485 /* Store Clock Extended */
2486 uint32_t HELPER(stcke)(uint64_t a1)
2488 stb(a1, 0);
2489 /* basically the same value as stck */
2490 stq(a1 + 1, clock_value(env) | env->cpu_num);
2491 /* more fine grained than stck */
2492 stq(a1 + 9, 0);
2493 /* XXX programmable fields */
2494 stw(a1 + 17, 0);
2497 return 0;
2500 /* Set Clock Comparator */
2501 void HELPER(sckc)(uint64_t a1)
2503 uint64_t time = ldq(a1);
2505 if (time == -1ULL) {
2506 return;
2509 /* difference between now and then */
2510 time -= clock_value(env);
2511 /* nanoseconds */
2512 time = (time * 125) >> 9;
2514 qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
2517 /* Store Clock Comparator */
2518 void HELPER(stckc)(uint64_t a1)
2520 /* XXX implement */
2521 stq(a1, 0);
2524 /* Set CPU Timer */
2525 void HELPER(spt)(uint64_t a1)
2527 uint64_t time = ldq(a1);
2529 if (time == -1ULL) {
2530 return;
2533 /* nanoseconds */
2534 time = (time * 125) >> 9;
2536 qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
2539 /* Store CPU Timer */
2540 void HELPER(stpt)(uint64_t a1)
2542 /* XXX implement */
2543 stq(a1, 0);
2546 /* Store System Information */
2547 uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
2549 int cc = 0;
2550 int sel1, sel2;
2552 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
2553 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
2554 /* valid function code, invalid reserved bits */
2555 program_interrupt(env, PGM_SPECIFICATION, 2);
2558 sel1 = r0 & STSI_R0_SEL1_MASK;
2559 sel2 = r1 & STSI_R1_SEL2_MASK;
2561 /* XXX: spec exception if sysib is not 4k-aligned */
2563 switch (r0 & STSI_LEVEL_MASK) {
2564 case STSI_LEVEL_1:
2565 if ((sel1 == 1) && (sel2 == 1)) {
2566 /* Basic Machine Configuration */
2567 struct sysib_111 sysib;
2569 memset(&sysib, 0, sizeof(sysib));
2570 ebcdic_put(sysib.manuf, "QEMU ", 16);
2571 /* same as machine type number in STORE CPU ID */
2572 ebcdic_put(sysib.type, "QEMU", 4);
2573 /* same as model number in STORE CPU ID */
2574 ebcdic_put(sysib.model, "QEMU ", 16);
2575 ebcdic_put(sysib.sequence, "QEMU ", 16);
2576 ebcdic_put(sysib.plant, "QEMU", 4);
2577 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2578 } else if ((sel1 == 2) && (sel2 == 1)) {
2579 /* Basic Machine CPU */
2580 struct sysib_121 sysib;
2582 memset(&sysib, 0, sizeof(sysib));
2583 /* XXX make different for different CPUs? */
2584 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2585 ebcdic_put(sysib.plant, "QEMU", 4);
2586 stw_p(&sysib.cpu_addr, env->cpu_num);
2587 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2588 } else if ((sel1 == 2) && (sel2 == 2)) {
2589 /* Basic Machine CPUs */
2590 struct sysib_122 sysib;
2592 memset(&sysib, 0, sizeof(sysib));
2593 stl_p(&sysib.capability, 0x443afc29);
2594 /* XXX change when SMP comes */
2595 stw_p(&sysib.total_cpus, 1);
2596 stw_p(&sysib.active_cpus, 1);
2597 stw_p(&sysib.standby_cpus, 0);
2598 stw_p(&sysib.reserved_cpus, 0);
2599 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2600 } else {
2601 cc = 3;
2603 break;
2604 case STSI_LEVEL_2:
2606 if ((sel1 == 2) && (sel2 == 1)) {
2607 /* LPAR CPU */
2608 struct sysib_221 sysib;
2610 memset(&sysib, 0, sizeof(sysib));
2611 /* XXX make different for different CPUs? */
2612 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2613 ebcdic_put(sysib.plant, "QEMU", 4);
2614 stw_p(&sysib.cpu_addr, env->cpu_num);
2615 stw_p(&sysib.cpu_id, 0);
2616 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2617 } else if ((sel1 == 2) && (sel2 == 2)) {
2618 /* LPAR CPUs */
2619 struct sysib_222 sysib;
2621 memset(&sysib, 0, sizeof(sysib));
2622 stw_p(&sysib.lpar_num, 0);
2623 sysib.lcpuc = 0;
2624 /* XXX change when SMP comes */
2625 stw_p(&sysib.total_cpus, 1);
2626 stw_p(&sysib.conf_cpus, 1);
2627 stw_p(&sysib.standby_cpus, 0);
2628 stw_p(&sysib.reserved_cpus, 0);
2629 ebcdic_put(sysib.name, "QEMU ", 8);
2630 stl_p(&sysib.caf, 1000);
2631 stw_p(&sysib.dedicated_cpus, 0);
2632 stw_p(&sysib.shared_cpus, 0);
2633 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2634 } else {
2635 cc = 3;
2637 break;
2639 case STSI_LEVEL_3:
2641 if ((sel1 == 2) && (sel2 == 2)) {
2642 /* VM CPUs */
2643 struct sysib_322 sysib;
2645 memset(&sysib, 0, sizeof(sysib));
2646 sysib.count = 1;
2647 /* XXX change when SMP comes */
2648 stw_p(&sysib.vm[0].total_cpus, 1);
2649 stw_p(&sysib.vm[0].conf_cpus, 1);
2650 stw_p(&sysib.vm[0].standby_cpus, 0);
2651 stw_p(&sysib.vm[0].reserved_cpus, 0);
2652 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
2653 stl_p(&sysib.vm[0].caf, 1000);
2654 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
2655 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2656 } else {
2657 cc = 3;
2659 break;
2661 case STSI_LEVEL_CURRENT:
2662 env->regs[0] = STSI_LEVEL_3;
2663 break;
2664 default:
2665 cc = 3;
2666 break;
2669 return cc;
2672 void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
2674 int i;
2675 uint64_t src = a2;
2677 for (i = r1;; i = (i + 1) % 16) {
2678 env->cregs[i] = ldq(src);
2679 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
2680 i, src, env->cregs[i]);
2681 src += sizeof(uint64_t);
2683 if (i == r3) {
2684 break;
2688 tlb_flush(env, 1);
2691 void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2693 int i;
2694 uint64_t src = a2;
2696 for (i = r1;; i = (i + 1) % 16) {
2697 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
2698 src += sizeof(uint32_t);
2700 if (i == r3) {
2701 break;
2705 tlb_flush(env, 1);
2708 void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
2710 int i;
2711 uint64_t dest = a2;
2713 for (i = r1;; i = (i + 1) % 16) {
2714 stq(dest, env->cregs[i]);
2715 dest += sizeof(uint64_t);
2717 if (i == r3) {
2718 break;
2723 void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2725 int i;
2726 uint64_t dest = a2;
2728 for (i = r1;; i = (i + 1) % 16) {
2729 stl(dest, env->cregs[i]);
2730 dest += sizeof(uint32_t);
2732 if (i == r3) {
2733 break;
2738 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
2740 /* XXX implement */
2742 return 0;
2745 /* insert storage key extended */
2746 uint64_t HELPER(iske)(uint64_t r2)
2748 uint64_t addr = get_address(0, 0, r2);
2750 if (addr > ram_size) {
2751 return 0;
2754 /* XXX maybe use qemu's internal keys? */
2755 return env->storage_keys[addr / TARGET_PAGE_SIZE];
2758 /* set storage key extended */
2759 void HELPER(sske)(uint32_t r1, uint64_t r2)
2761 uint64_t addr = get_address(0, 0, r2);
2763 if (addr > ram_size) {
2764 return;
2767 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
2770 /* reset reference bit extended */
2771 uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
2773 if (r2 > ram_size) {
2774 return 0;
2777 /* XXX implement */
2778 #if 0
2779 env->storage_keys[r2 / TARGET_PAGE_SIZE] &= ~SK_REFERENCED;
2780 #endif
2783 * cc
2785 * 0 Reference bit zero; change bit zero
2786 * 1 Reference bit zero; change bit one
2787 * 2 Reference bit one; change bit zero
2788 * 3 Reference bit one; change bit one
2790 return 0;
2793 /* compare and swap and purge */
2794 uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
2796 uint32_t cc;
2797 uint32_t o1 = env->regs[r1];
2798 uint64_t a2 = get_address_31fix(r2) & ~3ULL;
2799 uint32_t o2 = ldl(a2);
2801 if (o1 == o2) {
2802 stl(a2, env->regs[(r1 + 1) & 15]);
2803 if (env->regs[r2] & 0x3) {
2804 /* flush TLB / ALB */
2805 tlb_flush(env, 1);
2807 cc = 0;
2808 } else {
2809 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
2810 cc = 1;
2813 return cc;
2816 static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
2817 uint64_t mode2)
2819 target_ulong src, dest;
2820 int flags, cc = 0, i;
2822 if (!l) {
2823 return 0;
2824 } else if (l > 256) {
2825 /* max 256 */
2826 l = 256;
2827 cc = 3;
2830 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
2831 cpu_loop_exit(env);
2833 dest |= a1 & ~TARGET_PAGE_MASK;
2835 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
2836 cpu_loop_exit(env);
2838 src |= a2 & ~TARGET_PAGE_MASK;
2840 /* XXX replace w/ memcpy */
2841 for (i = 0; i < l; i++) {
2842 /* XXX be more clever */
2843 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
2844 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
2845 mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
2846 break;
2848 stb_phys(dest + i, ldub_phys(src + i));
2851 return cc;
2854 uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
2856 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2857 __FUNCTION__, l, a1, a2);
2859 return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
2862 uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
2864 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2865 __FUNCTION__, l, a1, a2);
2867 return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
2870 uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
2872 int cc = 0;
2874 HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
2875 __FUNCTION__, order_code, r1, cpu_addr);
2877 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2878 as parameter (input). Status (output) is always R1. */
2880 switch (order_code) {
2881 case SIGP_SET_ARCH:
2882 /* switch arch */
2883 break;
2884 case SIGP_SENSE:
2885 /* enumerate CPU status */
2886 if (cpu_addr) {
2887 /* XXX implement when SMP comes */
2888 return 3;
2890 env->regs[r1] &= 0xffffffff00000000ULL;
2891 cc = 1;
2892 break;
2893 default:
2894 /* unknown sigp */
2895 fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
2896 cc = 3;
2899 return cc;
2902 void HELPER(sacf)(uint64_t a1)
2904 HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
2906 switch (a1 & 0xf00) {
2907 case 0x000:
2908 env->psw.mask &= ~PSW_MASK_ASC;
2909 env->psw.mask |= PSW_ASC_PRIMARY;
2910 break;
2911 case 0x100:
2912 env->psw.mask &= ~PSW_MASK_ASC;
2913 env->psw.mask |= PSW_ASC_SECONDARY;
2914 break;
2915 case 0x300:
2916 env->psw.mask &= ~PSW_MASK_ASC;
2917 env->psw.mask |= PSW_ASC_HOME;
2918 break;
2919 default:
2920 qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
2921 program_interrupt(env, PGM_SPECIFICATION, 2);
2922 break;
2926 /* invalidate pte */
2927 void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
2929 uint64_t page = vaddr & TARGET_PAGE_MASK;
2930 uint64_t pte = 0;
2932 /* XXX broadcast to other CPUs */
2934 /* XXX Linux is nice enough to give us the exact pte address.
2935 According to spec we'd have to find it out ourselves */
2936 /* XXX Linux is fine with overwriting the pte, the spec requires
2937 us to only set the invalid bit */
2938 stq_phys(pte_addr, pte | _PAGE_INVALID);
2940 /* XXX we exploit the fact that Linux passes the exact virtual
2941 address here - it's not obliged to! */
2942 tlb_flush_page(env, page);
2945 /* flush local tlb */
2946 void HELPER(ptlb)(void)
2948 tlb_flush(env, 1);
2951 /* store using real address */
2952 void HELPER(stura)(uint64_t addr, uint32_t v1)
2954 stw_phys(get_address(0, 0, addr), v1);
2957 /* load real address */
2958 uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
2960 uint32_t cc = 0;
2961 int old_exc = env->exception_index;
2962 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2963 uint64_t ret;
2964 int flags;
2966 /* XXX incomplete - has more corner cases */
2967 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
2968 program_interrupt(env, PGM_SPECIAL_OP, 2);
2971 env->exception_index = old_exc;
2972 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
2973 cc = 3;
2975 if (env->exception_index == EXCP_PGM) {
2976 ret = env->int_pgm_code | 0x80000000;
2977 } else {
2978 ret |= addr & ~TARGET_PAGE_MASK;
2980 env->exception_index = old_exc;
2982 if (!(env->psw.mask & PSW_MASK_64)) {
2983 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
2984 } else {
2985 env->regs[r1] = ret;
2988 return cc;
2991 #endif