Switch build system to accompanied kernel headers
[qemu/stefanha.git] / target-s390x / op_helper.c
blob9429698c2ca07ed05ccfc372d0797f7a4e4a86c6
1 /*
2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "helpers.h"
24 #include <string.h>
25 #include "kvm.h"
26 #include <linux/kvm.h>
27 #include "qemu-timer.h"
29 /*****************************************************************************/
30 /* Softmmu support */
31 #if !defined (CONFIG_USER_ONLY)
33 #define MMUSUFFIX _mmu
35 #define SHIFT 0
36 #include "softmmu_template.h"
38 #define SHIFT 1
39 #include "softmmu_template.h"
41 #define SHIFT 2
42 #include "softmmu_template.h"
44 #define SHIFT 3
45 #include "softmmu_template.h"
47 /* try to fill the TLB and return an exception if error. If retaddr is
48 NULL, it means that the function was called in C code (i.e. not
49 from generated code or from helper.c) */
50 /* XXX: fix it to restore all registers */
51 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
53 TranslationBlock *tb;
54 CPUState *saved_env;
55 unsigned long pc;
56 int ret;
58 /* XXX: hack to restore env in all cases, even if not called from
59 generated code */
60 saved_env = env;
61 env = cpu_single_env;
62 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
63 if (unlikely(ret != 0)) {
64 if (likely(retaddr)) {
65 /* now we have a real cpu fault */
66 pc = (unsigned long)retaddr;
67 tb = tb_find_pc(pc);
68 if (likely(tb)) {
69 /* the PC is inside the translated code. It means that we have
70 a virtual CPU fault */
71 cpu_restore_state(tb, env, pc);
74 cpu_loop_exit();
76 env = saved_env;
79 #endif
81 /* #define DEBUG_HELPER */
82 #ifdef DEBUG_HELPER
83 #define HELPER_LOG(x...) qemu_log(x)
84 #else
85 #define HELPER_LOG(x...)
86 #endif
88 /* raise an exception */
89 void HELPER(exception)(uint32_t excp)
91 HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
92 env->exception_index = excp;
93 cpu_loop_exit();
96 #ifndef CONFIG_USER_ONLY
97 static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest,
98 uint8_t byte)
100 target_phys_addr_t dest_phys;
101 target_phys_addr_t len = l;
102 void *dest_p;
103 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
104 int flags;
106 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
107 stb(dest, byte);
108 cpu_abort(env, "should never reach here");
110 dest_phys |= dest & ~TARGET_PAGE_MASK;
112 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
114 memset(dest_p, byte, len);
116 cpu_physical_memory_unmap(dest_p, 1, len, len);
119 static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest,
120 uint64_t src)
122 target_phys_addr_t dest_phys;
123 target_phys_addr_t src_phys;
124 target_phys_addr_t len = l;
125 void *dest_p;
126 void *src_p;
127 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
128 int flags;
130 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
131 stb(dest, 0);
132 cpu_abort(env, "should never reach here");
134 dest_phys |= dest & ~TARGET_PAGE_MASK;
136 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
137 ldub(src);
138 cpu_abort(env, "should never reach here");
140 src_phys |= src & ~TARGET_PAGE_MASK;
142 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
143 src_p = cpu_physical_memory_map(src_phys, &len, 0);
145 memmove(dest_p, src_p, len);
147 cpu_physical_memory_unmap(dest_p, 1, len, len);
148 cpu_physical_memory_unmap(src_p, 0, len, len);
150 #endif
152 /* and on array */
153 uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
155 int i;
156 unsigned char x;
157 uint32_t cc = 0;
159 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
160 __FUNCTION__, l, dest, src);
161 for (i = 0; i <= l; i++) {
162 x = ldub(dest + i) & ldub(src + i);
163 if (x) {
164 cc = 1;
166 stb(dest + i, x);
168 return cc;
171 /* xor on array */
172 uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
174 int i;
175 unsigned char x;
176 uint32_t cc = 0;
178 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
179 __FUNCTION__, l, dest, src);
181 #ifndef CONFIG_USER_ONLY
182 /* xor with itself is the same as memset(0) */
183 if ((l > 32) && (src == dest) &&
184 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
185 mvc_fast_memset(env, l + 1, dest, 0);
186 return 0;
188 #else
189 if (src == dest) {
190 memset(g2h(dest), 0, l + 1);
191 return 0;
193 #endif
195 for (i = 0; i <= l; i++) {
196 x = ldub(dest + i) ^ ldub(src + i);
197 if (x) {
198 cc = 1;
200 stb(dest + i, x);
202 return cc;
205 /* or on array */
206 uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
208 int i;
209 unsigned char x;
210 uint32_t cc = 0;
212 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
213 __FUNCTION__, l, dest, src);
214 for (i = 0; i <= l; i++) {
215 x = ldub(dest + i) | ldub(src + i);
216 if (x) {
217 cc = 1;
219 stb(dest + i, x);
221 return cc;
224 /* memmove */
225 void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
227 int i = 0;
228 int x = 0;
229 uint32_t l_64 = (l + 1) / 8;
231 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
232 __FUNCTION__, l, dest, src);
234 #ifndef CONFIG_USER_ONLY
235 if ((l > 32) &&
236 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
237 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
238 if (dest == (src + 1)) {
239 mvc_fast_memset(env, l + 1, dest, ldub(src));
240 return;
241 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
242 mvc_fast_memmove(env, l + 1, dest, src);
243 return;
246 #else
247 if (dest == (src + 1)) {
248 memset(g2h(dest), ldub(src), l + 1);
249 return;
250 } else {
251 memmove(g2h(dest), g2h(src), l + 1);
252 return;
254 #endif
256 /* handle the parts that fit into 8-byte loads/stores */
257 if (dest != (src + 1)) {
258 for (i = 0; i < l_64; i++) {
259 stq(dest + x, ldq(src + x));
260 x += 8;
264 /* slow version crossing pages with byte accesses */
265 for (i = x; i <= l; i++) {
266 stb(dest + i, ldub(src + i));
270 /* compare unsigned byte arrays */
271 uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
273 int i;
274 unsigned char x,y;
275 uint32_t cc;
276 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
277 __FUNCTION__, l, s1, s2);
278 for (i = 0; i <= l; i++) {
279 x = ldub(s1 + i);
280 y = ldub(s2 + i);
281 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
282 if (x < y) {
283 cc = 1;
284 goto done;
285 } else if (x > y) {
286 cc = 2;
287 goto done;
290 cc = 0;
291 done:
292 HELPER_LOG("\n");
293 return cc;
296 /* compare logical under mask */
297 uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
299 uint8_t r,d;
300 uint32_t cc;
301 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
302 mask, addr);
303 cc = 0;
304 while (mask) {
305 if (mask & 8) {
306 d = ldub(addr);
307 r = (r1 & 0xff000000UL) >> 24;
308 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
309 addr);
310 if (r < d) {
311 cc = 1;
312 break;
313 } else if (r > d) {
314 cc = 2;
315 break;
317 addr++;
319 mask = (mask << 1) & 0xf;
320 r1 <<= 8;
322 HELPER_LOG("\n");
323 return cc;
326 /* store character under mask */
327 void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
329 uint8_t r;
330 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
331 addr);
332 while (mask) {
333 if (mask & 8) {
334 r = (r1 & 0xff000000UL) >> 24;
335 stb(addr, r);
336 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
337 addr++;
339 mask = (mask << 1) & 0xf;
340 r1 <<= 8;
342 HELPER_LOG("\n");
345 /* 64/64 -> 128 unsigned multiplication */
346 void HELPER(mlg)(uint32_t r1, uint64_t v2)
348 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
349 /* assuming 64-bit hosts have __uint128_t */
350 __uint128_t res = (__uint128_t)env->regs[r1 + 1];
351 res *= (__uint128_t)v2;
352 env->regs[r1] = (uint64_t)(res >> 64);
353 env->regs[r1 + 1] = (uint64_t)res;
354 #else
355 mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
356 #endif
359 /* 128 -> 64/64 unsigned division */
360 void HELPER(dlg)(uint32_t r1, uint64_t v2)
362 uint64_t divisor = v2;
364 if (!env->regs[r1]) {
365 /* 64 -> 64/64 case */
366 env->regs[r1] = env->regs[r1+1] % divisor;
367 env->regs[r1+1] = env->regs[r1+1] / divisor;
368 return;
369 } else {
371 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
372 /* assuming 64-bit hosts have __uint128_t */
373 __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
374 (env->regs[r1+1]);
375 __uint128_t quotient = dividend / divisor;
376 env->regs[r1+1] = quotient;
377 __uint128_t remainder = dividend % divisor;
378 env->regs[r1] = remainder;
379 #else
380 /* 32-bit hosts would need special wrapper functionality - just abort if
381 we encounter such a case; it's very unlikely anyways. */
382 cpu_abort(env, "128 -> 64/64 division not implemented\n");
383 #endif
387 static inline uint64_t get_address(int x2, int b2, int d2)
389 uint64_t r = d2;
391 if (x2) {
392 r += env->regs[x2];
395 if (b2) {
396 r += env->regs[b2];
399 /* 31-Bit mode */
400 if (!(env->psw.mask & PSW_MASK_64)) {
401 r &= 0x7fffffff;
404 return r;
407 static inline uint64_t get_address_31fix(int reg)
409 uint64_t r = env->regs[reg];
411 /* 31-Bit mode */
412 if (!(env->psw.mask & PSW_MASK_64)) {
413 r &= 0x7fffffff;
416 return r;
419 /* search string (c is byte to search, r2 is string, r1 end of string) */
420 uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
422 uint64_t i;
423 uint32_t cc = 2;
424 uint64_t str = get_address_31fix(r2);
425 uint64_t end = get_address_31fix(r1);
427 HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
428 c, env->regs[r1], env->regs[r2]);
430 for (i = str; i != end; i++) {
431 if (ldub(i) == c) {
432 env->regs[r1] = i;
433 cc = 1;
434 break;
438 return cc;
441 /* unsigned string compare (c is string terminator) */
442 uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
444 uint64_t s1 = get_address_31fix(r1);
445 uint64_t s2 = get_address_31fix(r2);
446 uint8_t v1, v2;
447 uint32_t cc;
448 c = c & 0xff;
449 #ifdef CONFIG_USER_ONLY
450 if (!c) {
451 HELPER_LOG("%s: comparing '%s' and '%s'\n",
452 __FUNCTION__, (char*)g2h(s1), (char*)g2h(s2));
454 #endif
455 for (;;) {
456 v1 = ldub(s1);
457 v2 = ldub(s2);
458 if ((v1 == c || v2 == c) || (v1 != v2)) {
459 break;
461 s1++;
462 s2++;
465 if (v1 == v2) {
466 cc = 0;
467 } else {
468 cc = (v1 < v2) ? 1 : 2;
469 /* FIXME: 31-bit mode! */
470 env->regs[r1] = s1;
471 env->regs[r2] = s2;
473 return cc;
476 /* move page */
477 void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
479 /* XXX missing r0 handling */
480 #ifdef CONFIG_USER_ONLY
481 int i;
483 for (i = 0; i < TARGET_PAGE_SIZE; i++) {
484 stb(r1 + i, ldub(r2 + i));
486 #else
487 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
488 #endif
491 /* string copy (c is string terminator) */
492 void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
494 uint64_t dest = get_address_31fix(r1);
495 uint64_t src = get_address_31fix(r2);
496 uint8_t v;
497 c = c & 0xff;
498 #ifdef CONFIG_USER_ONLY
499 if (!c) {
500 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src),
501 dest);
503 #endif
504 for (;;) {
505 v = ldub(src);
506 stb(dest, v);
507 if (v == c) {
508 break;
510 src++;
511 dest++;
513 env->regs[r1] = dest; /* FIXME: 31-bit mode! */
516 /* compare and swap 64-bit */
517 uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
519 /* FIXME: locking? */
520 uint32_t cc;
521 uint64_t v2 = ldq(a2);
522 if (env->regs[r1] == v2) {
523 cc = 0;
524 stq(a2, env->regs[r3]);
525 } else {
526 cc = 1;
527 env->regs[r1] = v2;
529 return cc;
532 /* compare double and swap 64-bit */
533 uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
535 /* FIXME: locking? */
536 uint32_t cc;
537 uint64_t v2_hi = ldq(a2);
538 uint64_t v2_lo = ldq(a2 + 8);
539 uint64_t v1_hi = env->regs[r1];
540 uint64_t v1_lo = env->regs[r1 + 1];
542 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
543 cc = 0;
544 stq(a2, env->regs[r3]);
545 stq(a2 + 8, env->regs[r3 + 1]);
546 } else {
547 cc = 1;
548 env->regs[r1] = v2_hi;
549 env->regs[r1 + 1] = v2_lo;
552 return cc;
555 /* compare and swap 32-bit */
556 uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
558 /* FIXME: locking? */
559 uint32_t cc;
560 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
561 uint32_t v2 = ldl(a2);
562 if (((uint32_t)env->regs[r1]) == v2) {
563 cc = 0;
564 stl(a2, (uint32_t)env->regs[r3]);
565 } else {
566 cc = 1;
567 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
569 return cc;
572 static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
574 int pos = 24; /* top of the lower half of r1 */
575 uint64_t rmask = 0xff000000ULL;
576 uint8_t val = 0;
577 int ccd = 0;
578 uint32_t cc = 0;
580 while (mask) {
581 if (mask & 8) {
582 env->regs[r1] &= ~rmask;
583 val = ldub(address);
584 if ((val & 0x80) && !ccd) {
585 cc = 1;
587 ccd = 1;
588 if (val && cc == 0) {
589 cc = 2;
591 env->regs[r1] |= (uint64_t)val << pos;
592 address++;
594 mask = (mask << 1) & 0xf;
595 pos -= 8;
596 rmask >>= 8;
599 return cc;
602 /* execute instruction
603 this instruction executes an insn modified with the contents of r1
604 it does not change the executed instruction in memory
605 it does not change the program counter
606 in other words: tricky...
607 currently implemented by interpreting the cases it is most commonly used in
609 uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
611 uint16_t insn = lduw_code(addr);
612 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
613 insn);
614 if ((insn & 0xf0ff) == 0xd000) {
615 uint32_t l, insn2, b1, b2, d1, d2;
616 l = v1 & 0xff;
617 insn2 = ldl_code(addr + 2);
618 b1 = (insn2 >> 28) & 0xf;
619 b2 = (insn2 >> 12) & 0xf;
620 d1 = (insn2 >> 16) & 0xfff;
621 d2 = insn2 & 0xfff;
622 switch (insn & 0xf00) {
623 case 0x200:
624 helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
625 break;
626 case 0x500:
627 cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
628 break;
629 case 0x700:
630 cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
631 break;
632 default:
633 goto abort;
634 break;
636 } else if ((insn & 0xff00) == 0x0a00) {
637 /* supervisor call */
638 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
639 env->psw.addr = ret - 4;
640 env->int_svc_code = (insn|v1) & 0xff;
641 env->int_svc_ilc = 4;
642 helper_exception(EXCP_SVC);
643 } else if ((insn & 0xff00) == 0xbf00) {
644 uint32_t insn2, r1, r3, b2, d2;
645 insn2 = ldl_code(addr + 2);
646 r1 = (insn2 >> 20) & 0xf;
647 r3 = (insn2 >> 16) & 0xf;
648 b2 = (insn2 >> 12) & 0xf;
649 d2 = insn2 & 0xfff;
650 cc = helper_icm(r1, get_address(0, b2, d2), r3);
651 } else {
652 abort:
653 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
654 insn);
656 return cc;
659 /* absolute value 32-bit */
660 uint32_t HELPER(abs_i32)(int32_t val)
662 if (val < 0) {
663 return -val;
664 } else {
665 return val;
669 /* negative absolute value 32-bit */
670 int32_t HELPER(nabs_i32)(int32_t val)
672 if (val < 0) {
673 return val;
674 } else {
675 return -val;
679 /* absolute value 64-bit */
680 uint64_t HELPER(abs_i64)(int64_t val)
682 HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
684 if (val < 0) {
685 return -val;
686 } else {
687 return val;
691 /* negative absolute value 64-bit */
692 int64_t HELPER(nabs_i64)(int64_t val)
694 if (val < 0) {
695 return val;
696 } else {
697 return -val;
701 /* add with carry 32-bit unsigned */
702 uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
704 uint32_t res;
706 res = v1 + v2;
707 if (cc & 2) {
708 res++;
711 return res;
714 /* store character under mask high operates on the upper half of r1 */
715 void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
717 int pos = 56; /* top of the upper half of r1 */
719 while (mask) {
720 if (mask & 8) {
721 stb(address, (env->regs[r1] >> pos) & 0xff);
722 address++;
724 mask = (mask << 1) & 0xf;
725 pos -= 8;
729 /* insert character under mask high; same as icm, but operates on the
730 upper half of r1 */
731 uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
733 int pos = 56; /* top of the upper half of r1 */
734 uint64_t rmask = 0xff00000000000000ULL;
735 uint8_t val = 0;
736 int ccd = 0;
737 uint32_t cc = 0;
739 while (mask) {
740 if (mask & 8) {
741 env->regs[r1] &= ~rmask;
742 val = ldub(address);
743 if ((val & 0x80) && !ccd) {
744 cc = 1;
746 ccd = 1;
747 if (val && cc == 0) {
748 cc = 2;
750 env->regs[r1] |= (uint64_t)val << pos;
751 address++;
753 mask = (mask << 1) & 0xf;
754 pos -= 8;
755 rmask >>= 8;
758 return cc;
761 /* insert psw mask and condition code into r1 */
762 void HELPER(ipm)(uint32_t cc, uint32_t r1)
764 uint64_t r = env->regs[r1];
766 r &= 0xffffffff00ffffffULL;
767 r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
768 env->regs[r1] = r;
769 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
770 cc, env->psw.mask, r);
773 /* load access registers r1 to r3 from memory at a2 */
774 void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
776 int i;
778 for (i = r1;; i = (i + 1) % 16) {
779 env->aregs[i] = ldl(a2);
780 a2 += 4;
782 if (i == r3) {
783 break;
788 /* store access registers r1 to r3 in memory at a2 */
789 void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
791 int i;
793 for (i = r1;; i = (i + 1) % 16) {
794 stl(a2, env->aregs[i]);
795 a2 += 4;
797 if (i == r3) {
798 break;
803 /* move long */
804 uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
806 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
807 uint64_t dest = get_address_31fix(r1);
808 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
809 uint64_t src = get_address_31fix(r2);
810 uint8_t pad = src >> 24;
811 uint8_t v;
812 uint32_t cc;
814 if (destlen == srclen) {
815 cc = 0;
816 } else if (destlen < srclen) {
817 cc = 1;
818 } else {
819 cc = 2;
822 if (srclen > destlen) {
823 srclen = destlen;
826 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
827 v = ldub(src);
828 stb(dest, v);
831 for (; destlen; dest++, destlen--) {
832 stb(dest, pad);
835 env->regs[r1 + 1] = destlen;
836 /* can't use srclen here, we trunc'ed it */
837 env->regs[r2 + 1] -= src - env->regs[r2];
838 env->regs[r1] = dest;
839 env->regs[r2] = src;
841 return cc;
844 /* move long extended another memcopy insn with more bells and whistles */
845 uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
847 uint64_t destlen = env->regs[r1 + 1];
848 uint64_t dest = env->regs[r1];
849 uint64_t srclen = env->regs[r3 + 1];
850 uint64_t src = env->regs[r3];
851 uint8_t pad = a2 & 0xff;
852 uint8_t v;
853 uint32_t cc;
855 if (!(env->psw.mask & PSW_MASK_64)) {
856 destlen = (uint32_t)destlen;
857 srclen = (uint32_t)srclen;
858 dest &= 0x7fffffff;
859 src &= 0x7fffffff;
862 if (destlen == srclen) {
863 cc = 0;
864 } else if (destlen < srclen) {
865 cc = 1;
866 } else {
867 cc = 2;
870 if (srclen > destlen) {
871 srclen = destlen;
874 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
875 v = ldub(src);
876 stb(dest, v);
879 for (; destlen; dest++, destlen--) {
880 stb(dest, pad);
883 env->regs[r1 + 1] = destlen;
884 /* can't use srclen here, we trunc'ed it */
885 /* FIXME: 31-bit mode! */
886 env->regs[r3 + 1] -= src - env->regs[r3];
887 env->regs[r1] = dest;
888 env->regs[r3] = src;
890 return cc;
893 /* compare logical long extended memcompare insn with padding */
894 uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
896 uint64_t destlen = env->regs[r1 + 1];
897 uint64_t dest = get_address_31fix(r1);
898 uint64_t srclen = env->regs[r3 + 1];
899 uint64_t src = get_address_31fix(r3);
900 uint8_t pad = a2 & 0xff;
901 uint8_t v1 = 0,v2 = 0;
902 uint32_t cc = 0;
904 if (!(destlen || srclen)) {
905 return cc;
908 if (srclen > destlen) {
909 srclen = destlen;
912 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
913 v1 = srclen ? ldub(src) : pad;
914 v2 = destlen ? ldub(dest) : pad;
915 if (v1 != v2) {
916 cc = (v1 < v2) ? 1 : 2;
917 break;
921 env->regs[r1 + 1] = destlen;
922 /* can't use srclen here, we trunc'ed it */
923 env->regs[r3 + 1] -= src - env->regs[r3];
924 env->regs[r1] = dest;
925 env->regs[r3] = src;
927 return cc;
930 /* subtract unsigned v2 from v1 with borrow */
931 uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
933 uint32_t v1 = env->regs[r1];
934 uint32_t res = v1 + (~v2) + (cc >> 1);
936 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
937 if (cc & 2) {
938 /* borrow */
939 return v1 ? 1 : 0;
940 } else {
941 return v1 ? 3 : 2;
945 /* subtract unsigned v2 from v1 with borrow */
946 uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
948 uint64_t res = v1 + (~v2) + (cc >> 1);
950 env->regs[r1] = res;
951 if (cc & 2) {
952 /* borrow */
953 return v1 ? 1 : 0;
954 } else {
955 return v1 ? 3 : 2;
959 static inline int float_comp_to_cc(int float_compare)
961 switch (float_compare) {
962 case float_relation_equal:
963 return 0;
964 case float_relation_less:
965 return 1;
966 case float_relation_greater:
967 return 2;
968 case float_relation_unordered:
969 return 3;
970 default:
971 cpu_abort(env, "unknown return value for float compare\n");
975 /* condition codes for binary FP ops */
976 static uint32_t set_cc_f32(float32 v1, float32 v2)
978 return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
981 static uint32_t set_cc_f64(float64 v1, float64 v2)
983 return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
986 /* condition codes for unary FP ops */
987 static uint32_t set_cc_nz_f32(float32 v)
989 if (float32_is_any_nan(v)) {
990 return 3;
991 } else if (float32_is_zero(v)) {
992 return 0;
993 } else if (float32_is_neg(v)) {
994 return 1;
995 } else {
996 return 2;
1000 static uint32_t set_cc_nz_f64(float64 v)
1002 if (float64_is_any_nan(v)) {
1003 return 3;
1004 } else if (float64_is_zero(v)) {
1005 return 0;
1006 } else if (float64_is_neg(v)) {
1007 return 1;
1008 } else {
1009 return 2;
1013 static uint32_t set_cc_nz_f128(float128 v)
1015 if (float128_is_any_nan(v)) {
1016 return 3;
1017 } else if (float128_is_zero(v)) {
1018 return 0;
1019 } else if (float128_is_neg(v)) {
1020 return 1;
1021 } else {
1022 return 2;
1026 /* convert 32-bit int to 64-bit float */
1027 void HELPER(cdfbr)(uint32_t f1, int32_t v2)
1029 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
1030 env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
1033 /* convert 32-bit int to 128-bit float */
1034 void HELPER(cxfbr)(uint32_t f1, int32_t v2)
1036 CPU_QuadU v1;
1037 v1.q = int32_to_float128(v2, &env->fpu_status);
1038 env->fregs[f1].ll = v1.ll.upper;
1039 env->fregs[f1 + 2].ll = v1.ll.lower;
1042 /* convert 64-bit int to 32-bit float */
1043 void HELPER(cegbr)(uint32_t f1, int64_t v2)
1045 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1046 env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
1049 /* convert 64-bit int to 64-bit float */
1050 void HELPER(cdgbr)(uint32_t f1, int64_t v2)
1052 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1053 env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
1056 /* convert 64-bit int to 128-bit float */
1057 void HELPER(cxgbr)(uint32_t f1, int64_t v2)
1059 CPU_QuadU x1;
1060 x1.q = int64_to_float128(v2, &env->fpu_status);
1061 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
1062 x1.ll.upper, x1.ll.lower);
1063 env->fregs[f1].ll = x1.ll.upper;
1064 env->fregs[f1 + 2].ll = x1.ll.lower;
1067 /* convert 32-bit int to 32-bit float */
1068 void HELPER(cefbr)(uint32_t f1, int32_t v2)
1070 env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
1071 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
1072 env->fregs[f1].l.upper, f1);
1075 /* 32-bit FP addition RR */
1076 uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
1078 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1079 env->fregs[f2].l.upper,
1080 &env->fpu_status);
1081 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1082 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1084 return set_cc_nz_f32(env->fregs[f1].l.upper);
1087 /* 64-bit FP addition RR */
1088 uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
1090 env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
1091 &env->fpu_status);
1092 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
1093 env->fregs[f2].d, env->fregs[f1].d, f1);
1095 return set_cc_nz_f64(env->fregs[f1].d);
1098 /* 32-bit FP subtraction RR */
1099 uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
1101 env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
1102 env->fregs[f2].l.upper,
1103 &env->fpu_status);
1104 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1105 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1107 return set_cc_nz_f32(env->fregs[f1].l.upper);
1110 /* 64-bit FP subtraction RR */
1111 uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
1113 env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
1114 &env->fpu_status);
1115 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1116 __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
1118 return set_cc_nz_f64(env->fregs[f1].d);
1121 /* 32-bit FP division RR */
1122 void HELPER(debr)(uint32_t f1, uint32_t f2)
1124 env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
1125 env->fregs[f2].l.upper,
1126 &env->fpu_status);
1129 /* 128-bit FP division RR */
1130 void HELPER(dxbr)(uint32_t f1, uint32_t f2)
1132 CPU_QuadU v1;
1133 v1.ll.upper = env->fregs[f1].ll;
1134 v1.ll.lower = env->fregs[f1 + 2].ll;
1135 CPU_QuadU v2;
1136 v2.ll.upper = env->fregs[f2].ll;
1137 v2.ll.lower = env->fregs[f2 + 2].ll;
1138 CPU_QuadU res;
1139 res.q = float128_div(v1.q, v2.q, &env->fpu_status);
1140 env->fregs[f1].ll = res.ll.upper;
1141 env->fregs[f1 + 2].ll = res.ll.lower;
1144 /* 64-bit FP multiplication RR */
1145 void HELPER(mdbr)(uint32_t f1, uint32_t f2)
1147 env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
1148 &env->fpu_status);
1151 /* 128-bit FP multiplication RR */
1152 void HELPER(mxbr)(uint32_t f1, uint32_t f2)
1154 CPU_QuadU v1;
1155 v1.ll.upper = env->fregs[f1].ll;
1156 v1.ll.lower = env->fregs[f1 + 2].ll;
1157 CPU_QuadU v2;
1158 v2.ll.upper = env->fregs[f2].ll;
1159 v2.ll.lower = env->fregs[f2 + 2].ll;
1160 CPU_QuadU res;
1161 res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
1162 env->fregs[f1].ll = res.ll.upper;
1163 env->fregs[f1 + 2].ll = res.ll.lower;
1166 /* convert 32-bit float to 64-bit float */
1167 void HELPER(ldebr)(uint32_t r1, uint32_t r2)
1169 env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
1170 &env->fpu_status);
1173 /* convert 128-bit float to 64-bit float */
1174 void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
1176 CPU_QuadU x2;
1177 x2.ll.upper = env->fregs[f2].ll;
1178 x2.ll.lower = env->fregs[f2 + 2].ll;
1179 env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
1180 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
1183 /* convert 64-bit float to 128-bit float */
1184 void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
1186 CPU_QuadU res;
1187 res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
1188 env->fregs[f1].ll = res.ll.upper;
1189 env->fregs[f1 + 2].ll = res.ll.lower;
1192 /* convert 64-bit float to 32-bit float */
1193 void HELPER(ledbr)(uint32_t f1, uint32_t f2)
1195 float64 d2 = env->fregs[f2].d;
1196 env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
1199 /* convert 128-bit float to 32-bit float */
1200 void HELPER(lexbr)(uint32_t f1, uint32_t f2)
1202 CPU_QuadU x2;
1203 x2.ll.upper = env->fregs[f2].ll;
1204 x2.ll.lower = env->fregs[f2 + 2].ll;
1205 env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
1206 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
1209 /* absolute value of 32-bit float */
1210 uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
1212 float32 v1;
1213 float32 v2 = env->fregs[f2].d;
1214 v1 = float32_abs(v2);
1215 env->fregs[f1].d = v1;
1216 return set_cc_nz_f32(v1);
1219 /* absolute value of 64-bit float */
1220 uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
1222 float64 v1;
1223 float64 v2 = env->fregs[f2].d;
1224 v1 = float64_abs(v2);
1225 env->fregs[f1].d = v1;
1226 return set_cc_nz_f64(v1);
1229 /* absolute value of 128-bit float */
1230 uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
1232 CPU_QuadU v1;
1233 CPU_QuadU v2;
1234 v2.ll.upper = env->fregs[f2].ll;
1235 v2.ll.lower = env->fregs[f2 + 2].ll;
1236 v1.q = float128_abs(v2.q);
1237 env->fregs[f1].ll = v1.ll.upper;
1238 env->fregs[f1 + 2].ll = v1.ll.lower;
1239 return set_cc_nz_f128(v1.q);
1242 /* load and test 64-bit float */
1243 uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
1245 env->fregs[f1].d = env->fregs[f2].d;
1246 return set_cc_nz_f64(env->fregs[f1].d);
1249 /* load and test 32-bit float */
1250 uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
1252 env->fregs[f1].l.upper = env->fregs[f2].l.upper;
1253 return set_cc_nz_f32(env->fregs[f1].l.upper);
1256 /* load and test 128-bit float */
1257 uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
1259 CPU_QuadU x;
1260 x.ll.upper = env->fregs[f2].ll;
1261 x.ll.lower = env->fregs[f2 + 2].ll;
1262 env->fregs[f1].ll = x.ll.upper;
1263 env->fregs[f1 + 2].ll = x.ll.lower;
1264 return set_cc_nz_f128(x.q);
1267 /* load complement of 32-bit float */
1268 uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
1270 env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
1272 return set_cc_nz_f32(env->fregs[f1].l.upper);
1275 /* load complement of 64-bit float */
1276 uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
1278 env->fregs[f1].d = float64_chs(env->fregs[f2].d);
1280 return set_cc_nz_f64(env->fregs[f1].d);
1283 /* load complement of 128-bit float */
1284 uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
1286 CPU_QuadU x1, x2;
1287 x2.ll.upper = env->fregs[f2].ll;
1288 x2.ll.lower = env->fregs[f2 + 2].ll;
1289 x1.q = float128_chs(x2.q);
1290 env->fregs[f1].ll = x1.ll.upper;
1291 env->fregs[f1 + 2].ll = x1.ll.lower;
1292 return set_cc_nz_f128(x1.q);
1295 /* 32-bit FP addition RM */
1296 void HELPER(aeb)(uint32_t f1, uint32_t val)
1298 float32 v1 = env->fregs[f1].l.upper;
1299 CPU_FloatU v2;
1300 v2.l = val;
1301 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
1302 v1, f1, v2.f);
1303 env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
1306 /* 32-bit FP division RM */
1307 void HELPER(deb)(uint32_t f1, uint32_t val)
1309 float32 v1 = env->fregs[f1].l.upper;
1310 CPU_FloatU v2;
1311 v2.l = val;
1312 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
1313 v1, f1, v2.f);
1314 env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
1317 /* 32-bit FP multiplication RM */
1318 void HELPER(meeb)(uint32_t f1, uint32_t val)
1320 float32 v1 = env->fregs[f1].l.upper;
1321 CPU_FloatU v2;
1322 v2.l = val;
1323 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
1324 v1, f1, v2.f);
1325 env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
1328 /* 32-bit FP compare RR */
1329 uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
1331 float32 v1 = env->fregs[f1].l.upper;
1332 float32 v2 = env->fregs[f2].l.upper;;
1333 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
1334 v1, f1, v2);
1335 return set_cc_f32(v1, v2);
1338 /* 64-bit FP compare RR */
1339 uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
1341 float64 v1 = env->fregs[f1].d;
1342 float64 v2 = env->fregs[f2].d;;
1343 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
1344 v1, f1, v2);
1345 return set_cc_f64(v1, v2);
1348 /* 128-bit FP compare RR */
1349 uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
1351 CPU_QuadU v1;
1352 v1.ll.upper = env->fregs[f1].ll;
1353 v1.ll.lower = env->fregs[f1 + 2].ll;
1354 CPU_QuadU v2;
1355 v2.ll.upper = env->fregs[f2].ll;
1356 v2.ll.lower = env->fregs[f2 + 2].ll;
1358 return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
1359 &env->fpu_status));
1362 /* 64-bit FP compare RM */
1363 uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
1365 float64 v1 = env->fregs[f1].d;
1366 CPU_DoubleU v2;
1367 v2.ll = ldq(a2);
1368 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
1369 f1, v2.d);
1370 return set_cc_f64(v1, v2.d);
1373 /* 64-bit FP addition RM */
1374 uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
1376 float64 v1 = env->fregs[f1].d;
1377 CPU_DoubleU v2;
1378 v2.ll = ldq(a2);
1379 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
1380 v1, f1, v2.d);
1381 env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
1382 return set_cc_nz_f64(v1);
1385 /* 32-bit FP subtraction RM */
1386 void HELPER(seb)(uint32_t f1, uint32_t val)
1388 float32 v1 = env->fregs[f1].l.upper;
1389 CPU_FloatU v2;
1390 v2.l = val;
1391 env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
1394 /* 64-bit FP subtraction RM */
1395 uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
1397 float64 v1 = env->fregs[f1].d;
1398 CPU_DoubleU v2;
1399 v2.ll = ldq(a2);
1400 env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
1401 return set_cc_nz_f64(v1);
1404 /* 64-bit FP multiplication RM */
1405 void HELPER(mdb)(uint32_t f1, uint64_t a2)
1407 float64 v1 = env->fregs[f1].d;
1408 CPU_DoubleU v2;
1409 v2.ll = ldq(a2);
1410 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
1411 v1, f1, v2.d);
1412 env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
1415 /* 64-bit FP division RM */
1416 void HELPER(ddb)(uint32_t f1, uint64_t a2)
1418 float64 v1 = env->fregs[f1].d;
1419 CPU_DoubleU v2;
1420 v2.ll = ldq(a2);
1421 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
1422 v1, f1, v2.d);
1423 env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
1426 static void set_round_mode(int m3)
1428 switch (m3) {
1429 case 0:
1430 /* current mode */
1431 break;
1432 case 1:
1433 /* biased round no nearest */
1434 case 4:
1435 /* round to nearest */
1436 set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
1437 break;
1438 case 5:
1439 /* round to zero */
1440 set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
1441 break;
1442 case 6:
1443 /* round to +inf */
1444 set_float_rounding_mode(float_round_up, &env->fpu_status);
1445 break;
1446 case 7:
1447 /* round to -inf */
1448 set_float_rounding_mode(float_round_down, &env->fpu_status);
1449 break;
1453 /* convert 32-bit float to 64-bit int */
1454 uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1456 float32 v2 = env->fregs[f2].l.upper;
1457 set_round_mode(m3);
1458 env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
1459 return set_cc_nz_f32(v2);
1462 /* convert 64-bit float to 64-bit int */
1463 uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1465 float64 v2 = env->fregs[f2].d;
1466 set_round_mode(m3);
1467 env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
1468 return set_cc_nz_f64(v2);
1471 /* convert 128-bit float to 64-bit int */
1472 uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1474 CPU_QuadU v2;
1475 v2.ll.upper = env->fregs[f2].ll;
1476 v2.ll.lower = env->fregs[f2 + 2].ll;
1477 set_round_mode(m3);
1478 env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
1479 if (float128_is_any_nan(v2.q)) {
1480 return 3;
1481 } else if (float128_is_zero(v2.q)) {
1482 return 0;
1483 } else if (float128_is_neg(v2.q)) {
1484 return 1;
1485 } else {
1486 return 2;
1490 /* convert 32-bit float to 32-bit int */
1491 uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1493 float32 v2 = env->fregs[f2].l.upper;
1494 set_round_mode(m3);
1495 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1496 float32_to_int32(v2, &env->fpu_status);
1497 return set_cc_nz_f32(v2);
1500 /* convert 64-bit float to 32-bit int */
1501 uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1503 float64 v2 = env->fregs[f2].d;
1504 set_round_mode(m3);
1505 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1506 float64_to_int32(v2, &env->fpu_status);
1507 return set_cc_nz_f64(v2);
1510 /* convert 128-bit float to 32-bit int */
1511 uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1513 CPU_QuadU v2;
1514 v2.ll.upper = env->fregs[f2].ll;
1515 v2.ll.lower = env->fregs[f2 + 2].ll;
1516 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1517 float128_to_int32(v2.q, &env->fpu_status);
1518 return set_cc_nz_f128(v2.q);
1521 /* load 32-bit FP zero */
1522 void HELPER(lzer)(uint32_t f1)
1524 env->fregs[f1].l.upper = float32_zero;
1527 /* load 64-bit FP zero */
1528 void HELPER(lzdr)(uint32_t f1)
1530 env->fregs[f1].d = float64_zero;
1533 /* load 128-bit FP zero */
1534 void HELPER(lzxr)(uint32_t f1)
1536 CPU_QuadU x;
1537 x.q = float64_to_float128(float64_zero, &env->fpu_status);
1538 env->fregs[f1].ll = x.ll.upper;
1539 env->fregs[f1 + 1].ll = x.ll.lower;
1542 /* 128-bit FP subtraction RR */
1543 uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
1545 CPU_QuadU v1;
1546 v1.ll.upper = env->fregs[f1].ll;
1547 v1.ll.lower = env->fregs[f1 + 2].ll;
1548 CPU_QuadU v2;
1549 v2.ll.upper = env->fregs[f2].ll;
1550 v2.ll.lower = env->fregs[f2 + 2].ll;
1551 CPU_QuadU res;
1552 res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
1553 env->fregs[f1].ll = res.ll.upper;
1554 env->fregs[f1 + 2].ll = res.ll.lower;
1555 return set_cc_nz_f128(res.q);
1558 /* 128-bit FP addition RR */
1559 uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
1561 CPU_QuadU v1;
1562 v1.ll.upper = env->fregs[f1].ll;
1563 v1.ll.lower = env->fregs[f1 + 2].ll;
1564 CPU_QuadU v2;
1565 v2.ll.upper = env->fregs[f2].ll;
1566 v2.ll.lower = env->fregs[f2 + 2].ll;
1567 CPU_QuadU res;
1568 res.q = float128_add(v1.q, v2.q, &env->fpu_status);
1569 env->fregs[f1].ll = res.ll.upper;
1570 env->fregs[f1 + 2].ll = res.ll.lower;
1571 return set_cc_nz_f128(res.q);
1574 /* 32-bit FP multiplication RR */
1575 void HELPER(meebr)(uint32_t f1, uint32_t f2)
1577 env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
1578 env->fregs[f2].l.upper,
1579 &env->fpu_status);
1582 /* 64-bit FP division RR */
1583 void HELPER(ddbr)(uint32_t f1, uint32_t f2)
1585 env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
1586 &env->fpu_status);
1589 /* 64-bit FP multiply and add RM */
1590 void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
1592 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
1593 CPU_DoubleU v2;
1594 v2.ll = ldq(a2);
1595 env->fregs[f1].d = float64_add(env->fregs[f1].d,
1596 float64_mul(v2.d, env->fregs[f3].d,
1597 &env->fpu_status),
1598 &env->fpu_status);
1601 /* 64-bit FP multiply and add RR */
1602 void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1604 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1605 env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
1606 env->fregs[f3].d,
1607 &env->fpu_status),
1608 env->fregs[f1].d, &env->fpu_status);
1611 /* 64-bit FP multiply and subtract RR */
1612 void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1614 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1615 env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
1616 env->fregs[f3].d,
1617 &env->fpu_status),
1618 env->fregs[f1].d, &env->fpu_status);
1621 /* 32-bit FP multiply and add RR */
1622 void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
1624 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1625 float32_mul(env->fregs[f2].l.upper,
1626 env->fregs[f3].l.upper,
1627 &env->fpu_status),
1628 &env->fpu_status);
1631 /* convert 64-bit float to 128-bit float */
1632 void HELPER(lxdb)(uint32_t f1, uint64_t a2)
1634 CPU_DoubleU v2;
1635 v2.ll = ldq(a2);
1636 CPU_QuadU v1;
1637 v1.q = float64_to_float128(v2.d, &env->fpu_status);
1638 env->fregs[f1].ll = v1.ll.upper;
1639 env->fregs[f1 + 2].ll = v1.ll.lower;
1642 /* test data class 32-bit */
1643 uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
1645 float32 v1 = env->fregs[f1].l.upper;
1646 int neg = float32_is_neg(v1);
1647 uint32_t cc = 0;
1649 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
1650 if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1651 (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1652 (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1653 (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1654 cc = 1;
1655 } else if (m2 & (1 << (9-neg))) {
1656 /* assume normalized number */
1657 cc = 1;
1660 /* FIXME: denormalized? */
1661 return cc;
1664 /* test data class 64-bit */
1665 uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
1667 float64 v1 = env->fregs[f1].d;
1668 int neg = float64_is_neg(v1);
1669 uint32_t cc = 0;
1671 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
1672 if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1673 (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1674 (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1675 (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1676 cc = 1;
1677 } else if (m2 & (1 << (9-neg))) {
1678 /* assume normalized number */
1679 cc = 1;
1681 /* FIXME: denormalized? */
1682 return cc;
1685 /* test data class 128-bit */
1686 uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
1688 CPU_QuadU v1;
1689 uint32_t cc = 0;
1690 v1.ll.upper = env->fregs[f1].ll;
1691 v1.ll.lower = env->fregs[f1 + 2].ll;
1693 int neg = float128_is_neg(v1.q);
1694 if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
1695 (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
1696 (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
1697 (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
1698 cc = 1;
1699 } else if (m2 & (1 << (9-neg))) {
1700 /* assume normalized number */
1701 cc = 1;
1703 /* FIXME: denormalized? */
1704 return cc;
1707 /* find leftmost one */
1708 uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
1710 uint64_t res = 0;
1711 uint64_t ov2 = v2;
1713 while (!(v2 & 0x8000000000000000ULL) && v2) {
1714 v2 <<= 1;
1715 res++;
1718 if (!v2) {
1719 env->regs[r1] = 64;
1720 env->regs[r1 + 1] = 0;
1721 return 0;
1722 } else {
1723 env->regs[r1] = res;
1724 env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
1725 return 2;
1729 /* square root 64-bit RR */
1730 void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
1732 env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
1735 /* checksum */
1736 void HELPER(cksm)(uint32_t r1, uint32_t r2)
1738 uint64_t src = get_address_31fix(r2);
1739 uint64_t src_len = env->regs[(r2 + 1) & 15];
1740 uint64_t cksm = (uint32_t)env->regs[r1];
1742 while (src_len >= 4) {
1743 cksm += ldl(src);
1745 /* move to next word */
1746 src_len -= 4;
1747 src += 4;
1750 switch (src_len) {
1751 case 0:
1752 break;
1753 case 1:
1754 cksm += ldub(src) << 24;
1755 break;
1756 case 2:
1757 cksm += lduw(src) << 16;
1758 break;
1759 case 3:
1760 cksm += lduw(src) << 16;
1761 cksm += ldub(src + 2) << 8;
1762 break;
1765 /* indicate we've processed everything */
1766 env->regs[r2] = src + src_len;
1767 env->regs[(r2 + 1) & 15] = 0;
1769 /* store result */
1770 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1771 ((uint32_t)cksm + (cksm >> 32));
1774 static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src,
1775 int32_t dst)
1777 if (src == dst) {
1778 return 0;
1779 } else if (src < dst) {
1780 return 1;
1781 } else {
1782 return 2;
1786 static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst)
1788 return cc_calc_ltgt_32(env, dst, 0);
1791 static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src,
1792 int64_t dst)
1794 if (src == dst) {
1795 return 0;
1796 } else if (src < dst) {
1797 return 1;
1798 } else {
1799 return 2;
1803 static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst)
1805 return cc_calc_ltgt_64(env, dst, 0);
1808 static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src,
1809 uint32_t dst)
1811 if (src == dst) {
1812 return 0;
1813 } else if (src < dst) {
1814 return 1;
1815 } else {
1816 return 2;
1820 static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src,
1821 uint64_t dst)
1823 if (src == dst) {
1824 return 0;
1825 } else if (src < dst) {
1826 return 1;
1827 } else {
1828 return 2;
1832 static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask)
1834 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
1835 uint16_t r = val & mask;
1836 if (r == 0 || mask == 0) {
1837 return 0;
1838 } else if (r == mask) {
1839 return 3;
1840 } else {
1841 return 1;
1845 /* set condition code for test under mask */
1846 static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask)
1848 uint16_t r = val & mask;
1849 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
1850 if (r == 0 || mask == 0) {
1851 return 0;
1852 } else if (r == mask) {
1853 return 3;
1854 } else {
1855 while (!(mask & 0x8000)) {
1856 mask <<= 1;
1857 val <<= 1;
1859 if (val & 0x8000) {
1860 return 2;
1861 } else {
1862 return 1;
1867 static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst)
1869 return !!dst;
1872 static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2,
1873 int64_t ar)
1875 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1876 return 3; /* overflow */
1877 } else {
1878 if (ar < 0) {
1879 return 1;
1880 } else if (ar > 0) {
1881 return 2;
1882 } else {
1883 return 0;
1888 static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2,
1889 uint64_t ar)
1891 if (ar == 0) {
1892 if (a1) {
1893 return 2;
1894 } else {
1895 return 0;
1897 } else {
1898 if (ar < a1 || ar < a2) {
1899 return 3;
1900 } else {
1901 return 1;
1906 static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2,
1907 int64_t ar)
1909 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
1910 return 3; /* overflow */
1911 } else {
1912 if (ar < 0) {
1913 return 1;
1914 } else if (ar > 0) {
1915 return 2;
1916 } else {
1917 return 0;
1922 static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2,
1923 uint64_t ar)
1925 if (ar == 0) {
1926 return 2;
1927 } else {
1928 if (a2 > a1) {
1929 return 1;
1930 } else {
1931 return 3;
1936 static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst)
1938 if ((uint64_t)dst == 0x8000000000000000ULL) {
1939 return 3;
1940 } else if (dst) {
1941 return 1;
1942 } else {
1943 return 0;
1947 static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst)
1949 return !!dst;
1952 static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst)
1954 if ((uint64_t)dst == 0x8000000000000000ULL) {
1955 return 3;
1956 } else if (dst < 0) {
1957 return 1;
1958 } else if (dst > 0) {
1959 return 2;
1960 } else {
1961 return 0;
1966 static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2,
1967 int32_t ar)
1969 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1970 return 3; /* overflow */
1971 } else {
1972 if (ar < 0) {
1973 return 1;
1974 } else if (ar > 0) {
1975 return 2;
1976 } else {
1977 return 0;
1982 static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2,
1983 uint32_t ar)
1985 if (ar == 0) {
1986 if (a1) {
1987 return 2;
1988 } else {
1989 return 0;
1991 } else {
1992 if (ar < a1 || ar < a2) {
1993 return 3;
1994 } else {
1995 return 1;
2000 static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2,
2001 int32_t ar)
2003 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
2004 return 3; /* overflow */
2005 } else {
2006 if (ar < 0) {
2007 return 1;
2008 } else if (ar > 0) {
2009 return 2;
2010 } else {
2011 return 0;
2016 static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2,
2017 uint32_t ar)
2019 if (ar == 0) {
2020 return 2;
2021 } else {
2022 if (a2 > a1) {
2023 return 1;
2024 } else {
2025 return 3;
2030 static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst)
2032 if ((uint32_t)dst == 0x80000000UL) {
2033 return 3;
2034 } else if (dst) {
2035 return 1;
2036 } else {
2037 return 0;
2041 static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst)
2043 return !!dst;
2046 static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst)
2048 if ((uint32_t)dst == 0x80000000UL) {
2049 return 3;
2050 } else if (dst < 0) {
2051 return 1;
2052 } else if (dst > 0) {
2053 return 2;
2054 } else {
2055 return 0;
2059 /* calculate condition code for insert character under mask insn */
2060 static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val)
2062 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
2063 uint32_t cc;
2065 if (mask == 0xf) {
2066 if (!val) {
2067 return 0;
2068 } else if (val & 0x80000000) {
2069 return 1;
2070 } else {
2071 return 2;
2075 if (!val || !mask) {
2076 cc = 0;
2077 } else {
2078 while (mask != 1) {
2079 mask >>= 1;
2080 val >>= 8;
2082 if (val & 0x80) {
2083 cc = 1;
2084 } else {
2085 cc = 2;
2088 return cc;
2091 static inline uint32_t cc_calc_slag(CPUState *env, uint64_t src, uint64_t shift)
2093 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
2094 uint64_t match, r;
2096 /* check if the sign bit stays the same */
2097 if (src & (1ULL << 63)) {
2098 match = mask;
2099 } else {
2100 match = 0;
2103 if ((src & mask) != match) {
2104 /* overflow */
2105 return 3;
2108 r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
2110 if ((int64_t)r == 0) {
2111 return 0;
2112 } else if ((int64_t)r < 0) {
2113 return 1;
2116 return 2;
2120 static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src,
2121 uint64_t dst, uint64_t vr)
2123 uint32_t r = 0;
2125 switch (cc_op) {
2126 case CC_OP_CONST0:
2127 case CC_OP_CONST1:
2128 case CC_OP_CONST2:
2129 case CC_OP_CONST3:
2130 /* cc_op value _is_ cc */
2131 r = cc_op;
2132 break;
2133 case CC_OP_LTGT0_32:
2134 r = cc_calc_ltgt0_32(env, dst);
2135 break;
2136 case CC_OP_LTGT0_64:
2137 r = cc_calc_ltgt0_64(env, dst);
2138 break;
2139 case CC_OP_LTGT_32:
2140 r = cc_calc_ltgt_32(env, src, dst);
2141 break;
2142 case CC_OP_LTGT_64:
2143 r = cc_calc_ltgt_64(env, src, dst);
2144 break;
2145 case CC_OP_LTUGTU_32:
2146 r = cc_calc_ltugtu_32(env, src, dst);
2147 break;
2148 case CC_OP_LTUGTU_64:
2149 r = cc_calc_ltugtu_64(env, src, dst);
2150 break;
2151 case CC_OP_TM_32:
2152 r = cc_calc_tm_32(env, src, dst);
2153 break;
2154 case CC_OP_TM_64:
2155 r = cc_calc_tm_64(env, src, dst);
2156 break;
2157 case CC_OP_NZ:
2158 r = cc_calc_nz(env, dst);
2159 break;
2160 case CC_OP_ADD_64:
2161 r = cc_calc_add_64(env, src, dst, vr);
2162 break;
2163 case CC_OP_ADDU_64:
2164 r = cc_calc_addu_64(env, src, dst, vr);
2165 break;
2166 case CC_OP_SUB_64:
2167 r = cc_calc_sub_64(env, src, dst, vr);
2168 break;
2169 case CC_OP_SUBU_64:
2170 r = cc_calc_subu_64(env, src, dst, vr);
2171 break;
2172 case CC_OP_ABS_64:
2173 r = cc_calc_abs_64(env, dst);
2174 break;
2175 case CC_OP_NABS_64:
2176 r = cc_calc_nabs_64(env, dst);
2177 break;
2178 case CC_OP_COMP_64:
2179 r = cc_calc_comp_64(env, dst);
2180 break;
2182 case CC_OP_ADD_32:
2183 r = cc_calc_add_32(env, src, dst, vr);
2184 break;
2185 case CC_OP_ADDU_32:
2186 r = cc_calc_addu_32(env, src, dst, vr);
2187 break;
2188 case CC_OP_SUB_32:
2189 r = cc_calc_sub_32(env, src, dst, vr);
2190 break;
2191 case CC_OP_SUBU_32:
2192 r = cc_calc_subu_32(env, src, dst, vr);
2193 break;
2194 case CC_OP_ABS_32:
2195 r = cc_calc_abs_64(env, dst);
2196 break;
2197 case CC_OP_NABS_32:
2198 r = cc_calc_nabs_64(env, dst);
2199 break;
2200 case CC_OP_COMP_32:
2201 r = cc_calc_comp_32(env, dst);
2202 break;
2204 case CC_OP_ICM:
2205 r = cc_calc_icm_32(env, src, dst);
2206 break;
2207 case CC_OP_SLAG:
2208 r = cc_calc_slag(env, src, dst);
2209 break;
2211 case CC_OP_LTGT_F32:
2212 r = set_cc_f32(src, dst);
2213 break;
2214 case CC_OP_LTGT_F64:
2215 r = set_cc_f64(src, dst);
2216 break;
2217 case CC_OP_NZ_F32:
2218 r = set_cc_nz_f32(dst);
2219 break;
2220 case CC_OP_NZ_F64:
2221 r = set_cc_nz_f64(dst);
2222 break;
2224 default:
2225 cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
2228 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
2229 cc_name(cc_op), src, dst, vr, r);
2230 return r;
2233 uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
2234 uint64_t vr)
2236 return do_calc_cc(env, cc_op, src, dst, vr);
2239 uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
2240 uint64_t vr)
2242 return do_calc_cc(env, cc_op, src, dst, vr);
2245 uint64_t HELPER(cvd)(int32_t bin)
2247 /* positive 0 */
2248 uint64_t dec = 0x0c;
2249 int shift = 4;
2251 if (bin < 0) {
2252 bin = -bin;
2253 dec = 0x0d;
2256 for (shift = 4; (shift < 64) && bin; shift += 4) {
2257 int current_number = bin % 10;
2259 dec |= (current_number) << shift;
2260 bin /= 10;
2263 return dec;
2266 void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
2268 int len_dest = len >> 4;
2269 int len_src = len & 0xf;
2270 uint8_t b;
2271 int second_nibble = 0;
2273 dest += len_dest;
2274 src += len_src;
2276 /* last byte is special, it only flips the nibbles */
2277 b = ldub(src);
2278 stb(dest, (b << 4) | (b >> 4));
2279 src--;
2280 len_src--;
2282 /* now pad every nibble with 0xf0 */
2284 while (len_dest > 0) {
2285 uint8_t cur_byte = 0;
2287 if (len_src > 0) {
2288 cur_byte = ldub(src);
2291 len_dest--;
2292 dest--;
2294 /* only advance one nibble at a time */
2295 if (second_nibble) {
2296 cur_byte >>= 4;
2297 len_src--;
2298 src--;
2300 second_nibble = !second_nibble;
2302 /* digit */
2303 cur_byte = (cur_byte & 0xf);
2304 /* zone bits */
2305 cur_byte |= 0xf0;
2307 stb(dest, cur_byte);
2311 void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
2313 int i;
2315 for (i = 0; i <= len; i++) {
2316 uint8_t byte = ldub(array + i);
2317 uint8_t new_byte = ldub(trans + byte);
2318 stb(array + i, new_byte);
2322 #ifndef CONFIG_USER_ONLY
2324 void HELPER(load_psw)(uint64_t mask, uint64_t addr)
2326 load_psw(env, mask, addr);
2327 cpu_loop_exit();
2330 static void program_interrupt(CPUState *env, uint32_t code, int ilc)
2332 qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
2334 if (kvm_enabled()) {
2335 kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
2336 } else {
2337 env->int_pgm_code = code;
2338 env->int_pgm_ilc = ilc;
2339 env->exception_index = EXCP_PGM;
2340 cpu_loop_exit();
2344 static void ext_interrupt(CPUState *env, int type, uint32_t param,
2345 uint64_t param64)
2347 cpu_inject_ext(env, type, param, param64);
2350 int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code)
2352 int r = 0;
2353 int shift = 0;
2355 #ifdef DEBUG_HELPER
2356 printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
2357 #endif
2359 if (sccb & ~0x7ffffff8ul) {
2360 fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
2361 r = -1;
2362 goto out;
2365 switch(code) {
2366 case SCLP_CMDW_READ_SCP_INFO:
2367 case SCLP_CMDW_READ_SCP_INFO_FORCED:
2368 while ((ram_size >> (20 + shift)) > 65535) {
2369 shift++;
2371 stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift));
2372 stb_phys(sccb + SCP_INCREMENT, 1 << shift);
2373 stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
2375 if (kvm_enabled()) {
2376 #ifdef CONFIG_KVM
2377 kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
2378 sccb & ~3, 0, 1);
2379 #endif
2380 } else {
2381 env->psw.addr += 4;
2382 ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
2384 break;
2385 default:
2386 #ifdef DEBUG_HELPER
2387 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
2388 #endif
2389 r = -1;
2390 break;
2393 out:
2394 return r;
2397 /* SCLP service call */
2398 uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
2400 if (sclp_service_call(env, r1, r2)) {
2401 return 3;
2404 return 0;
2407 /* DIAG */
2408 uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
2410 uint64_t r;
2412 switch (num) {
2413 case 0x500:
2414 /* KVM hypercall */
2415 r = s390_virtio_hypercall(env, mem, code);
2416 break;
2417 case 0x44:
2418 /* yield */
2419 r = 0;
2420 break;
2421 case 0x308:
2422 /* ipl */
2423 r = 0;
2424 break;
2425 default:
2426 r = -1;
2427 break;
2430 if (r) {
2431 program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
2434 return r;
2437 /* Store CPU ID */
2438 void HELPER(stidp)(uint64_t a1)
2440 stq(a1, env->cpu_num);
2443 /* Set Prefix */
2444 void HELPER(spx)(uint64_t a1)
2446 uint32_t prefix;
2448 prefix = ldl(a1);
2449 env->psa = prefix & 0xfffff000;
2450 qemu_log("prefix: %#x\n", prefix);
2451 tlb_flush_page(env, 0);
2452 tlb_flush_page(env, TARGET_PAGE_SIZE);
2455 /* Set Clock */
2456 uint32_t HELPER(sck)(uint64_t a1)
2458 /* XXX not implemented - is it necessary? */
2460 return 0;
2463 static inline uint64_t clock_value(CPUState *env)
2465 uint64_t time;
2467 time = env->tod_offset +
2468 time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
2470 return time;
2473 /* Store Clock */
2474 uint32_t HELPER(stck)(uint64_t a1)
2476 stq(a1, clock_value(env));
2478 return 0;
2481 /* Store Clock Extended */
2482 uint32_t HELPER(stcke)(uint64_t a1)
2484 stb(a1, 0);
2485 /* basically the same value as stck */
2486 stq(a1 + 1, clock_value(env) | env->cpu_num);
2487 /* more fine grained than stck */
2488 stq(a1 + 9, 0);
2489 /* XXX programmable fields */
2490 stw(a1 + 17, 0);
2493 return 0;
2496 /* Set Clock Comparator */
2497 void HELPER(sckc)(uint64_t a1)
2499 uint64_t time = ldq(a1);
2501 if (time == -1ULL) {
2502 return;
2505 /* difference between now and then */
2506 time -= clock_value(env);
2507 /* nanoseconds */
2508 time = (time * 125) >> 9;
2510 qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
2513 /* Store Clock Comparator */
2514 void HELPER(stckc)(uint64_t a1)
2516 /* XXX implement */
2517 stq(a1, 0);
2520 /* Set CPU Timer */
2521 void HELPER(spt)(uint64_t a1)
2523 uint64_t time = ldq(a1);
2525 if (time == -1ULL) {
2526 return;
2529 /* nanoseconds */
2530 time = (time * 125) >> 9;
2532 qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
2535 /* Store CPU Timer */
2536 void HELPER(stpt)(uint64_t a1)
2538 /* XXX implement */
2539 stq(a1, 0);
2542 /* Store System Information */
2543 uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
2545 int cc = 0;
2546 int sel1, sel2;
2548 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
2549 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
2550 /* valid function code, invalid reserved bits */
2551 program_interrupt(env, PGM_SPECIFICATION, 2);
2554 sel1 = r0 & STSI_R0_SEL1_MASK;
2555 sel2 = r1 & STSI_R1_SEL2_MASK;
2557 /* XXX: spec exception if sysib is not 4k-aligned */
2559 switch (r0 & STSI_LEVEL_MASK) {
2560 case STSI_LEVEL_1:
2561 if ((sel1 == 1) && (sel2 == 1)) {
2562 /* Basic Machine Configuration */
2563 struct sysib_111 sysib;
2565 memset(&sysib, 0, sizeof(sysib));
2566 ebcdic_put(sysib.manuf, "QEMU ", 16);
2567 /* same as machine type number in STORE CPU ID */
2568 ebcdic_put(sysib.type, "QEMU", 4);
2569 /* same as model number in STORE CPU ID */
2570 ebcdic_put(sysib.model, "QEMU ", 16);
2571 ebcdic_put(sysib.sequence, "QEMU ", 16);
2572 ebcdic_put(sysib.plant, "QEMU", 4);
2573 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2574 } else if ((sel1 == 2) && (sel2 == 1)) {
2575 /* Basic Machine CPU */
2576 struct sysib_121 sysib;
2578 memset(&sysib, 0, sizeof(sysib));
2579 /* XXX make different for different CPUs? */
2580 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2581 ebcdic_put(sysib.plant, "QEMU", 4);
2582 stw_p(&sysib.cpu_addr, env->cpu_num);
2583 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2584 } else if ((sel1 == 2) && (sel2 == 2)) {
2585 /* Basic Machine CPUs */
2586 struct sysib_122 sysib;
2588 memset(&sysib, 0, sizeof(sysib));
2589 stl_p(&sysib.capability, 0x443afc29);
2590 /* XXX change when SMP comes */
2591 stw_p(&sysib.total_cpus, 1);
2592 stw_p(&sysib.active_cpus, 1);
2593 stw_p(&sysib.standby_cpus, 0);
2594 stw_p(&sysib.reserved_cpus, 0);
2595 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2596 } else {
2597 cc = 3;
2599 break;
2600 case STSI_LEVEL_2:
2602 if ((sel1 == 2) && (sel2 == 1)) {
2603 /* LPAR CPU */
2604 struct sysib_221 sysib;
2606 memset(&sysib, 0, sizeof(sysib));
2607 /* XXX make different for different CPUs? */
2608 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2609 ebcdic_put(sysib.plant, "QEMU", 4);
2610 stw_p(&sysib.cpu_addr, env->cpu_num);
2611 stw_p(&sysib.cpu_id, 0);
2612 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2613 } else if ((sel1 == 2) && (sel2 == 2)) {
2614 /* LPAR CPUs */
2615 struct sysib_222 sysib;
2617 memset(&sysib, 0, sizeof(sysib));
2618 stw_p(&sysib.lpar_num, 0);
2619 sysib.lcpuc = 0;
2620 /* XXX change when SMP comes */
2621 stw_p(&sysib.total_cpus, 1);
2622 stw_p(&sysib.conf_cpus, 1);
2623 stw_p(&sysib.standby_cpus, 0);
2624 stw_p(&sysib.reserved_cpus, 0);
2625 ebcdic_put(sysib.name, "QEMU ", 8);
2626 stl_p(&sysib.caf, 1000);
2627 stw_p(&sysib.dedicated_cpus, 0);
2628 stw_p(&sysib.shared_cpus, 0);
2629 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2630 } else {
2631 cc = 3;
2633 break;
2635 case STSI_LEVEL_3:
2637 if ((sel1 == 2) && (sel2 == 2)) {
2638 /* VM CPUs */
2639 struct sysib_322 sysib;
2641 memset(&sysib, 0, sizeof(sysib));
2642 sysib.count = 1;
2643 /* XXX change when SMP comes */
2644 stw_p(&sysib.vm[0].total_cpus, 1);
2645 stw_p(&sysib.vm[0].conf_cpus, 1);
2646 stw_p(&sysib.vm[0].standby_cpus, 0);
2647 stw_p(&sysib.vm[0].reserved_cpus, 0);
2648 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
2649 stl_p(&sysib.vm[0].caf, 1000);
2650 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
2651 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2652 } else {
2653 cc = 3;
2655 break;
2657 case STSI_LEVEL_CURRENT:
2658 env->regs[0] = STSI_LEVEL_3;
2659 break;
2660 default:
2661 cc = 3;
2662 break;
2665 return cc;
2668 void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
2670 int i;
2671 uint64_t src = a2;
2673 for (i = r1;; i = (i + 1) % 16) {
2674 env->cregs[i] = ldq(src);
2675 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
2676 i, src, env->cregs[i]);
2677 src += sizeof(uint64_t);
2679 if (i == r3) {
2680 break;
2684 tlb_flush(env, 1);
2687 void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2689 int i;
2690 uint64_t src = a2;
2692 for (i = r1;; i = (i + 1) % 16) {
2693 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
2694 src += sizeof(uint32_t);
2696 if (i == r3) {
2697 break;
2701 tlb_flush(env, 1);
2704 void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
2706 int i;
2707 uint64_t dest = a2;
2709 for (i = r1;; i = (i + 1) % 16) {
2710 stq(dest, env->cregs[i]);
2711 dest += sizeof(uint64_t);
2713 if (i == r3) {
2714 break;
2719 void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2721 int i;
2722 uint64_t dest = a2;
2724 for (i = r1;; i = (i + 1) % 16) {
2725 stl(dest, env->cregs[i]);
2726 dest += sizeof(uint32_t);
2728 if (i == r3) {
2729 break;
2734 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
2736 /* XXX implement */
2738 return 0;
2741 /* insert storage key extended */
2742 uint64_t HELPER(iske)(uint64_t r2)
2744 uint64_t addr = get_address(0, 0, r2);
2746 if (addr > ram_size) {
2747 return 0;
2750 /* XXX maybe use qemu's internal keys? */
2751 return env->storage_keys[addr / TARGET_PAGE_SIZE];
2754 /* set storage key extended */
2755 void HELPER(sske)(uint32_t r1, uint64_t r2)
2757 uint64_t addr = get_address(0, 0, r2);
2759 if (addr > ram_size) {
2760 return;
2763 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
2766 /* reset reference bit extended */
2767 uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
2769 if (r2 > ram_size) {
2770 return 0;
2773 /* XXX implement */
2774 #if 0
2775 env->storage_keys[r2 / TARGET_PAGE_SIZE] &= ~SK_REFERENCED;
2776 #endif
2779 * cc
2781 * 0 Reference bit zero; change bit zero
2782 * 1 Reference bit zero; change bit one
2783 * 2 Reference bit one; change bit zero
2784 * 3 Reference bit one; change bit one
2786 return 0;
2789 /* compare and swap and purge */
2790 uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
2792 uint32_t cc;
2793 uint32_t o1 = env->regs[r1];
2794 uint64_t a2 = get_address_31fix(r2) & ~3ULL;
2795 uint32_t o2 = ldl(a2);
2797 if (o1 == o2) {
2798 stl(a2, env->regs[(r1 + 1) & 15]);
2799 if (env->regs[r2] & 0x3) {
2800 /* flush TLB / ALB */
2801 tlb_flush(env, 1);
2803 cc = 0;
2804 } else {
2805 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
2806 cc = 1;
2809 return cc;
2812 static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
2813 uint64_t mode2)
2815 target_ulong src, dest;
2816 int flags, cc = 0, i;
2818 if (!l) {
2819 return 0;
2820 } else if (l > 256) {
2821 /* max 256 */
2822 l = 256;
2823 cc = 3;
2826 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
2827 cpu_loop_exit();
2829 dest |= a1 & ~TARGET_PAGE_MASK;
2831 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
2832 cpu_loop_exit();
2834 src |= a2 & ~TARGET_PAGE_MASK;
2836 /* XXX replace w/ memcpy */
2837 for (i = 0; i < l; i++) {
2838 /* XXX be more clever */
2839 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
2840 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
2841 mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
2842 break;
2844 stb_phys(dest + i, ldub_phys(src + i));
2847 return cc;
2850 uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
2852 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2853 __FUNCTION__, l, a1, a2);
2855 return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
2858 uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
2860 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2861 __FUNCTION__, l, a1, a2);
2863 return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
2866 uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
2868 int cc = 0;
2870 HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
2871 __FUNCTION__, order_code, r1, cpu_addr);
2873 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2874 as parameter (input). Status (output) is always R1. */
2876 switch (order_code) {
2877 case SIGP_SET_ARCH:
2878 /* switch arch */
2879 break;
2880 case SIGP_SENSE:
2881 /* enumerate CPU status */
2882 if (cpu_addr) {
2883 /* XXX implement when SMP comes */
2884 return 3;
2886 env->regs[r1] &= 0xffffffff00000000ULL;
2887 cc = 1;
2888 break;
2889 default:
2890 /* unknown sigp */
2891 fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
2892 cc = 3;
2895 return cc;
2898 void HELPER(sacf)(uint64_t a1)
2900 HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
2902 switch (a1 & 0xf00) {
2903 case 0x000:
2904 env->psw.mask &= ~PSW_MASK_ASC;
2905 env->psw.mask |= PSW_ASC_PRIMARY;
2906 break;
2907 case 0x100:
2908 env->psw.mask &= ~PSW_MASK_ASC;
2909 env->psw.mask |= PSW_ASC_SECONDARY;
2910 break;
2911 case 0x300:
2912 env->psw.mask &= ~PSW_MASK_ASC;
2913 env->psw.mask |= PSW_ASC_HOME;
2914 break;
2915 default:
2916 qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
2917 program_interrupt(env, PGM_SPECIFICATION, 2);
2918 break;
2922 /* invalidate pte */
2923 void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
2925 uint64_t page = vaddr & TARGET_PAGE_MASK;
2926 uint64_t pte = 0;
2928 /* XXX broadcast to other CPUs */
2930 /* XXX Linux is nice enough to give us the exact pte address.
2931 According to spec we'd have to find it out ourselves */
2932 /* XXX Linux is fine with overwriting the pte, the spec requires
2933 us to only set the invalid bit */
2934 stq_phys(pte_addr, pte | _PAGE_INVALID);
2936 /* XXX we exploit the fact that Linux passes the exact virtual
2937 address here - it's not obliged to! */
2938 tlb_flush_page(env, page);
2941 /* flush local tlb */
2942 void HELPER(ptlb)(void)
2944 tlb_flush(env, 1);
2947 /* store using real address */
2948 void HELPER(stura)(uint64_t addr, uint32_t v1)
2950 stw_phys(get_address(0, 0, addr), v1);
2953 /* load real address */
2954 uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
2956 uint32_t cc = 0;
2957 int old_exc = env->exception_index;
2958 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2959 uint64_t ret;
2960 int flags;
2962 /* XXX incomplete - has more corner cases */
2963 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
2964 program_interrupt(env, PGM_SPECIAL_OP, 2);
2967 env->exception_index = old_exc;
2968 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
2969 cc = 3;
2971 if (env->exception_index == EXCP_PGM) {
2972 ret = env->int_pgm_code | 0x80000000;
2973 } else {
2974 ret |= addr & ~TARGET_PAGE_MASK;
2976 env->exception_index = old_exc;
2978 if (!(env->psw.mask & PSW_MASK_64)) {
2979 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
2980 } else {
2981 env->regs[r1] = ret;
2984 return cc;
2987 #endif